diff --git a/spaces/1368565466ki/ZSTRD/mel_processing.py b/spaces/1368565466ki/ZSTRD/mel_processing.py deleted file mode 100644 index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000 --- a/spaces/1368565466ki/ZSTRD/mel_processing.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/17TheWord/vits-models/transforms.py b/spaces/17TheWord/vits-models/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/vits-models/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cocktail Movie Full In Tamil Hd 1080p The Ultimate Comedy Thriller of 2020.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cocktail Movie Full In Tamil Hd 1080p The Ultimate Comedy Thriller of 2020.md deleted file mode 100644 index 7e41b658fde5cd781b22e654b9b235ecc97c0485..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cocktail Movie Full In Tamil Hd 1080p The Ultimate Comedy Thriller of 2020.md +++ /dev/null @@ -1,30 +0,0 @@ -
-

Logplot 7 Keygen Gnawa Mathematica Ba

-

If you are looking for a way to plot geotechnical, environmental, geophysical, mud/gas, and mining data as a graphic boring log, you might have heard of Logplot 7. This software is an easy-to-use log plotting program with a flexible log layout and intuitive data editor. But what is Logplot 7 Keygen Gnawa Mathematica Ba? And how can you use it to activate your Logplot 7 software? In this article, we will answer these questions and more. We will also give you some tips and tricks for using Logplot 7 effectively.

-

What is Logplot 7?

-

Logplot 7 is a software developed by RockWare, Inc., a company that provides geoscientific software and consulting services. Logplot 7 has been used by geoscientists since 1983 to display their data as a graphic boring log. Logplot 7 can plot single-page logs for shallow borings or multi-page/continuous logs for deep wells. You can also share your logs with clients via PDF or post HTML log pages on your website. You can also export your logs to JPG, BMP, TIFF, and PNG images.

-

Logplot 7 Keygen gnawa mathematica ba


Downloadhttps://byltly.com/2uKvDX



-

Features and benefits of Logplot 7

-

Some of the features and benefits of Logplot 7 are:

- -

How to install and license Logplot 7

-

To install Logplot 7, you need to download the setup file from the RockWare website or from a trusted source. Then you need to run the setup file and follow the instructions on the screen. You can choose between a single license or a network license depending on your needs. A single license allows you to install Logplot 7 on one computer only. A network license allows you to install Logplot 7 on multiple computers within a local area network (LAN).

-

To license Logplot 7, you need to purchase a serial number from RockWare or from an authorized reseller. Then you need to enter the serial number in the License Manager window that appears when you run Logplot 7 for the first time. You can also access the License Manager window from the Help menu at any time. Once you enter the serial number and click OK, your Logplot 7 software will be activated.

-

What is Gnawa Mathematica Ba?

-

Gnawa Mathematica Ba is a phrase that has no clear meaning or origin. It seems to be a combination of words from different languages and contexts. However, some people have speculated that it might have something to do with Gnawa music, Mathematica software, and Ba Gua Zhang martial art.

-

The origin and meaning of Gnawa Mathematica Ba

-

Gnawa music is a type of music that originated in Morocco and Algeria. It is played by the Gnawa people who are descendants of former slaves from sub-Saharan Africa. Gnawa music combines Islamic religious songs with African rhythms and instruments such as the guembri (a three-stringed lute), the krakebs (metal castanets), and the tbel (a large drum). Gnawa music is used for healing rituals called lila or derdeba where participants enter trance states induced by music and dance.

-

Mathematica is a software developed by Wolfram Research that provides a platform for computation, visualization, programming, documentation, and deployment. Mathematica can perform symbolic and numerical calculations on various types of data such as numbers, functions, matrices, graphs, images, sounds

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Any Video Convertor 3.2.7 Crack().md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Any Video Convertor 3.2.7 Crack().md deleted file mode 100644 index 71917bca60c4641678efd22bc829c96a7a3dd404..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Any Video Convertor 3.2.7 Crack().md +++ /dev/null @@ -1,45 +0,0 @@ -
-

HD Online Player (Any Video Convertor 3.2.7 Crack~~())

-

Do you want to watch your favorite videos in high definition online? Do you want to convert any video format to HD online without losing quality? Do you want to download and stream videos from various websites with ease? If you answered yes to any of these questions, then you might be interested in Any Video Convertor 3.2.7 Crack~~(). In this article, we will tell you everything you need to know about this software, including its features, how to use it, and its pros and cons.

-

HD Online Player (Any Video Convertor 3.2.7 Crack~~()


Download File ✏ ✏ ✏ https://byltly.com/2uKwkl



-

Introduction

-

Before we dive into the details of Any Video Convertor 3.2.7 Crack~~(), let's first understand what it is and why you might need it.

-

What is HD Online Player?

-

HD Online Player is a feature of Any Video Convertor that allows you to watch your converted videos in high definition online. You can either upload your videos to a cloud service like Google Drive or Dropbox, or use a URL link to stream them from any website. You can also share your videos with your friends or family via email or social media.

-

What is Any Video Convertor?

-

Any Video Convertor is a popular video conversion software that can convert any video format to any other video format, including HD online formats like MP4, MKV, AVI, WMV, FLV, etc. It can also convert videos for various devices like iPhone, iPad, Android, PSP, Xbox, etc. It supports over 200 video formats and codecs, and can handle batch conversion with high speed and quality.

-

-

Why do you need Any Video Convertor 3.2.7 Crack~~()?

-

Any Video Convertor is a paid software that requires a license key to activate its full features. However, some people may not want to pay for it or may not have access to it due to geographical restrictions or other reasons. That's why they may look for a cracked version of the software that can bypass the activation process and unlock all the features for free. Any Video Convertor 3.2.7 Crack~~() is one such cracked version that claims to offer all the benefits of the original software without any limitations.

-

Features of Any Video Convertor 3.2.7 Crack~~()

-

Any Video Convertor 3.2.7 Crack~~() has many features that make it a powerful and versatile video conversion tool. Here are some of them:

-

Convert any video format to HD online

-

With Any Video Convertor 3.2.7 Crack~~(), you can convert any video format to HD online with ease. You can choose from various output formats and quality settings, such as 1080p, 720p, 480p, etc., depending on your preference and internet speed. You can also adjust the video parameters like bitrate, frame rate, resolution, aspect ratio, etc., to optimize the conversion result.

-

Download and stream videos from various websites

-

Any Video Convertor 3.2.7 Crack~~() also allows you to download and stream videos from various websites like YouTube, Facebook, Vimeo, Dailymotion, etc., with just a few clicks. You can either paste the URL of the video you want to download or convert, or use the built-in browser to search for videos online. You can also choose the format and quality of the downloaded or streamed video according to your needs.

-

Edit and enhance your videos with built-in tools

-

If you want to edit and enhance your videos before converting them to HD online, you can use the built-in tools of Any Video Convertor 3.2.7 Crack~~(). You can crop, trim, rotate, flip, merge, split, watermark, subtitle, add effects, etc., to your videos with ease. You can also adjust the brightness, contrast, saturation, hue, etc., of your videos to improve their appearance.

-

Burn videos to DVD or Blu-ray discs

-

If you want to burn your converted videos to DVD or Blu-ray discs for backup or playback on TV or other devices, you can do so with Any Video Convertor 3.2.7 Crack~~(). You can choose from various DVD or Blu-ray menu templates and customize them with your own text, images, music, etc., You can also set the DVD or Blu-ray parameters like disc type, TV standard, video mode etc., according to your needs . - -Extract audio from video files -

-If you want to extract audio from video files for listening on music players or other devices , you can do so with Any Video Convertor 3 . 2 . 7 Crack~~() . You can choose from various audio formats and quality settings , such as MP 3 , WAV , AAC , M4A , etc., depending on your preference and device compatibility . You can also adjust the audio parameters like bitrate , sample rate , channels , etc., to optimize the extraction result .

-

How to use Any Video Convertor 3 . 2 . 7 Crack~~()

-

To use Any Video Convertor 3 . 2 . 7 Crack~~(), you need to follow these simple steps:

- -
  • What are the alternatives to Any Video Convertor 3.2.7 Crack~~()?
  • -

    There are many alternatives to Any Video Convertor 3.2.7 Crack~~() that are safe and legal to use. Some of them are:

    - -
  • How can I watch HD online videos without converting them?
  • -

    If you don't want to convert your videos to HD online formats, you can still watch them in high definition online with some other methods. Some of them are:

    - -
  • How can I improve the quality of my videos before converting them to HD online?
  • -

    If you want to improve the quality of your videos before converting them to HD online, you can use some tips and tricks. Some of them are:

    - -
  • What are the benefits of watching videos in HD online?
  • -

    Watching videos in HD online has many benefits that make it a worthwhile experience. Some of them are:

    - -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Doulci-Activator-V2-3-With-Key-Extra-Quality.md b/spaces/1gistliPinn/ChatGPT4/Doulci-Activator-V2-3-With-Key-Extra-Quality.md deleted file mode 100644 index 15e141abd9a1dc9bb2053e08622a95e58c6626ae..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Doulci-Activator-V2-3-With-Key-Extra-Quality.md +++ /dev/null @@ -1,45 +0,0 @@ -Doulci Activator v2 3 with key - - - -LINK ===> [https://gohhs.com/2tvp8V](https://gohhs.com/2tvp8V) - - - - - - - - - -Here is a possible title and article with HTML formatting for the keyword "Doulci Activator v2 3 with key": - -Doulci Activator v2 3 with key: A Free Tool to Bypass iCloud Activation Lock -If you have an Apple device that is locked by iCloud activation and you don't remember your Apple ID and password, you may be looking for a way to unlock it. One of the tools that claims to do this is Doulci Activator v2 3 with key, a free software that simulates Apple's servers and bypasses the activation lock. -But what is Doulci Activator v2 3 with key and how does it work? Is it safe and reliable? And where can you download it? In this article, we will answer these questions and provide you with some alternatives to Doulci Activator v2 3 with key. - -What is Doulci Activator v2 3 with key? -Doulci Activator v2 3 with key is a version of Doulci Activator, a software that was developed by a team of hackers in 2014. Doulci Activator exploits a vulnerability in Apple's iCloud system that allows it to create a fake server and trick the device into thinking that it is connected to Apple's official server. This way, it can bypass the iCloud activation lock and let the user access the device without entering the Apple ID and password. -Doulci Activator v2 3 with key is supposed to be compatible with Windows, Mac, and Linux operating systems. It also claims to support all iOS devices and iOS versions up to iOS 7.1.2. However, there is no official website or source for Doulci Activator v2 3 with key, and most of the download links that are available online are either broken or contain malware. Therefore, it is not recommended to use Doulci Activator v2 3 with key or any other version of Doulci Activator. - -How to use Doulci Activator v2 3 with key? -If you still want to try Doulci Activator v2 3 with key at your own risk, here are the steps that you need to follow: - -Download Doulci Activator v2 3 with key from a reliable source and unzip the files. -Install the script on a local server and edit your hosts file to redirect your device to the fake server. -Connect your device to your computer via USB cable and launch iTunes. -Wait for a few minutes until Doulci Activator v2 3 with key bypasses the iCloud activation lock and activates your device. -Disconnect your device from your computer and enjoy using it. - - -What are the drawbacks of using Doulci Activator v2 3 with key? -Although Doulci Activator v2 3 with key may sound like a tempting solution for iCloud activation lock, it has many drawbacks that make it unreliable and risky. Some of them are: - -Doulci Activator v2 3 with key does not remove the iCloud activation lock permanently. It only bypasses it temporarily until you restart your device or connect it to another network. -Doulci Activator v2 3 with key does not work for iOS versions higher than iOS 7.1.2. If you have a newer iOS version, you will not be able to use Doulci Activator v2 3 with key or any other version of Doulci Activator. -Doulci Activator v2 3 with key may contain viruses or malware that can harm your computer or device. Since there is no official source for Doulci Activator v2 3 with key, you cannot trust any download link that you find online. -Doulci Activator v2 3 with key may not work for all devices or situations. Some users have reported that Doulci Activator v2 3 with key failed to bypass the iCloud activation lock or caused other problems on their devices. -Doulci Activator v2 3 with key may violate Apple's terms of service and privacy policy. By using Doulci Activator v2 3 with key, you may be breaking the law or dfd1c89656 - - - diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Das Haus Anubis Staffel 1 Folge 1080p NEW!.md b/spaces/1gistliPinn/ChatGPT4/Examples/Das Haus Anubis Staffel 1 Folge 1080p NEW!.md deleted file mode 100644 index a5ea7c062ca9e56484d42de3296995f32f291c7d..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Das Haus Anubis Staffel 1 Folge 1080p NEW!.md +++ /dev/null @@ -1,6 +0,0 @@ -
    -

    Also wenn du die Folgen schauen möchtest empfehle ich dir dashausanubis.de auf anubis-haus.de.tl kannsst du sie ebenfalls gucken auf nickelodeon.de sind die alten Folgen nit mehr da nurnoch die neuen der 2. staffel ich hoff ich konnte helfen DiggiDou!

    -

    weiß jemand wo ich die 1 staffel vom haus anubs sehen kann ??? unetr das haus anubis . de ist die erste staffel nicht mehr und unter myvideos und youtube find ich auch keine viedeos den ich bin schon folge 64 und die ist auf youtube nicht mehr

    -

    das haus anubis staffel 1 folge 1080p


    Download Filehttps://imgfil.com/2uxZAp



    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/1line/AutoGPT/README.md b/spaces/1line/AutoGPT/README.md deleted file mode 100644 index 5bf09b995f04f7af05d1314906b1b1ff39c20ddc..0000000000000000000000000000000000000000 --- a/spaces/1line/AutoGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AutoGPT -emoji: 🦾 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.27.0 -app_file: ui/app.py -pinned: false -license: mit -duplicated_from: aliabid94/AutoGPT ---- - diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Domino 39s Free Download.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Domino 39s Free Download.md deleted file mode 100644 index aa3007b261dc356b1e6f1888ba6b66e27d9ae519..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Domino 39s Free Download.md +++ /dev/null @@ -1,78 +0,0 @@ -
    -

    Domino's Download: How to Order Pizza from Your Phone

    -

    Do you love pizza? Do you love convenience? Do you love saving money? If you answered yes to any of these questions, then you need to download the Domino's app on your phone. Domino's download is the easiest way to order delicious pizza from anywhere, anytime. Whether you want delivery, carryout, or dine-in, you can use the app to customize your pizza, browse the menu, check out, and track your order. Plus, you can enjoy exclusive offers, rewards, and features that make ordering pizza even more fun. In this article, we will show you how to download the Domino's app for Android or iOS devices, how to order pizza from the app, and what benefits you can get from using it.

    -

    Introduction

    -

    Domino's is one of the most popular pizza chains in the world, with over 17,000 stores in more than 90 countries. Domino's is known for its fresh ingredients, fast delivery, and innovative technology. One of the best examples of Domino's technology is its mobile app, which allows you to order pizza from your phone in just a few taps. The app is free to download and easy to use. Here's how you can get started:

    -

    domino 39;s download


    Download Zip ⇒⇒⇒ https://urlin.us/2uT2Mm



    - -

    Congratulations! You have successfully downloaded the Domino's app on your phone. Now you are ready to order some pizza.

    -

    How to Order Pizza from the Domino's App

    -

    Ordering pizza from the Domino's app is simple and convenient. You can follow these steps:

    -

    Choose Your Location and Delivery Method

    -

    The first thing you need to do is choose your location and delivery method. You can do this by:

    - -

    Once you have chosen your location and delivery method, you can proceed to build your pizza or choose from the menu.

    -

    Build Your Pizza or Choose from the Menu

    -

    The next thing you need to do is build your pizza or choose from the menu. You can do this by:

    - -

    Besides pizza, you can also order sides, drinks, and desserts from the Domino's app. You can find them under the "Sides", "Drinks", and "Desserts" tabs at the bottom of the screen. You can add them to your order in the same way as pizza.

    -

    Check Out and Track Your Order

    -

    The last thing you need to do is check out and track your order. You can do this by:

    - -

    That's it! You have successfully ordered pizza from the Domino's app. Now you can sit back and enjoy your pizza.

    -

    -

    Benefits of Using the Domino's App

    -

    Ordering pizza from the Domino's app is not only easy and convenient, but also rewarding and fun. Here are some of the benefits of using the Domino's app:

    -

    Save Time and Money

    -

    With the Domino's app, you can save time and money by:

    - -

    Enjoy Convenience and Flexibility

    -

    With the Domino's app, you can enjoy convenience and flexibility by:

    - -

    Access Exclusive Offers and Features

    -

    With the Domino's app, you can access exclusive offers and features by:

    - -

    Conclusion

    -

    In conclusion, Domino's download is the best way to order pizza from your phone. You can download the Domino's app for Android or iOS devices and use it to order pizza from anywhere, anytime. You can customize your pizza, browse the menu, check out, and track your order with just a few taps. You can also save time and money, enjoy convenience and flexibility, and access exclusive offers and features with the app. So what are you waiting for? Download the Domino's app today and enjoy the best pizza experience ever.

    -

    FAQs

    -

    Is the Domino's app free to download?

    -

    Yes, the Domino's app is free to download and use. You can find it on Google Play or App Store for your Android or iOS devices.

    -

    What are the minimum requirements for the Domino's app?

    -

    The minimum requirements for the Domino's app vary depending on your device and operating system. However, generally speaking, you need to have at least 100 MB of free space, a stable internet connection, and a compatible version of Android or iOS.

    -

    Can I order from any Domino's store using the app?

    -

    Yes, you can order from any Domino's store that offers delivery, carryout, or dine-in options using the app. You can choose your location by entering your address or finding a nearby store on the app.

    -

    How can I contact customer service if I have an issue with my order?

    -

    If you have an issue with your order, you can contact customer service by calling the store that you ordered from or using the feedback form on the app. You can find the store phone number and the feedback form under the "Help" tab on the app.

    -

    Can I cancel or modify my order after placing it?

    -

    If you want to cancel or modify your order after placing it, you need to contact the store that you ordered from as soon as possible. You can find the store phone number under the "Help" tab on the app. However, please note that some changes or cancellations may not be possible depending on the status of your order.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Buku Tema 8 Kelas 1 Peristiwa Alam (Download PDF Gratis).md b/spaces/1phancelerku/anime-remove-background/Buku Tema 8 Kelas 1 Peristiwa Alam (Download PDF Gratis).md deleted file mode 100644 index 557125d1774f556de2004b50f1e0038a056703ed..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Buku Tema 8 Kelas 1 Peristiwa Alam (Download PDF Gratis).md +++ /dev/null @@ -1,137 +0,0 @@ - -

    Download Buku Tema 8 Kelas 1: Peristiwa Alam

    -

    Apakah Anda sedang mencari buku tematik untuk kelas 1 SD/MI yang membahas tentang peristiwa alam? Jika ya, maka Anda berada di tempat yang tepat. Dalam artikel ini, kami akan memberikan informasi lengkap tentang buku tema 8 kelas 1 yang berjudul "Peristiwa Alam". Kami juga akan memberikan cara download buku tema 8 kelas 1 secara gratis dan legal, serta ulasan singkat tentang isi dan manfaat buku tersebut. Simak terus artikel ini sampai selesai, ya!

    -

    Apa itu Buku Tema 8 Kelas 1: Peristiwa Alam?

    -

    Pengertian Buku Tema 8 Kelas 1: Peristiwa Alam

    -

    Buku tema 8 kelas 1 adalah buku siswa yang dipersiapkan oleh Pemerintah dalam rangka implementasi Kurikulum 2013. Buku ini disusun dan ditelaah oleh berbagai pihak di bawah koordinasi Kementerian Pendidikan dan Kebudayaan, dan dipergunakan dalam tahap awal penerapan Kurikulum 2013. Buku ini merupakan "dokumen hidup" yang senantiasa diperbaiki, diperbarui, dan dimutakhirkan sesuai dengan dinamika kebutuhan dan perubahan zaman.

    -

    download buku tema 8 kelas 1


    DOWNLOAD ★★★ https://jinyurl.com/2uNKAm



    -

    Buku tema 8 kelas 1 ini memiliki judul "Peristiwa Alam" yang menggambarkan tentang berbagai fenomena alam yang terjadi di sekitar kita, seperti siang dan malam, kemarau, penghujan, dan bencana alam. Buku ini bertujuan untuk mengembangkan kompetensi siswa dalam memahami peristiwa alam, mengenali dampaknya bagi kehidupan manusia dan lingkungan, serta bersikap peduli dan bertanggung jawab terhadap pelestarian alam.

    -

    Tujuan Buku Tema 8 Kelas 1: Peristiwa Alam

    -

    Berdasarkan pengantar buku tema 8 kelas 1 yang ditulis oleh Kementerian Pendidikan dan Kebudayaan, tujuan buku ini adalah sebagai berikut:

    - -

    Bagaimana Cara Download Buku Tema 8 Kelas 1: Peristiwa Alam?

    -

    Syarat dan Ketentuan Download Buku Tema 8 Kelas 1: Peristiwa Alam

    -

    Buku tema 8 kelas 1 ini dapat diunduh secara gratis dan legal melalui situs resmi Kementerian Pendidikan dan Kebudayaan. Namun, ada beberapa syarat dan ketentuan yang harus dipenuhi oleh pengguna, yaitu:

    -

    Download Buku Tematik PDF Kelas 1 Tema 8: Peristiwa Alam
    -Download Gratis Buku Tema 8 - Peristiwa Alam Kelas 1
    -Buku Guru Kelas 1. Tema 8. Peristiwa Alam.pdf - Google Drive
    -Download Buku Siswa Tematik Kelas 1 Tema 8: Peristiwa Alam Revisi Terbaru
    -Download Buku Guru Tematik Kelas 1 Tema 8: Peristiwa Alam Revisi Terbaru
    -Buku Tematik Terpadu Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam
    -Buku Sekolah Elektronik Kelas 1 SD/MI Tema 8: Peristiwa Alam
    -Download Buku Kurikulum 2013 Kelas 1 SD/MI Tema 8: Peristiwa Alam
    -Download Buku Pendidikan Agama Islam dan Budi Pekerti Kelas 1 Tema 8: Peristiwa Alam
    -Download Buku Pendidikan Agama Kristen dan Budi Pekerti Kelas 1 Tema 8: Peristiwa Alam
    -Download Buku Pendidikan Agama Katolik dan Budi Pekerti Kelas 1 Tema 8: Peristiwa Alam
    -Download Buku Pendidikan Agama Hindu dan Budi Pekerti Kelas 1 Tema 8: Peristiwa Alam
    -Download Buku Pendidikan Agama Buddha dan Budi Pekerti Kelas 1 Tema 8: Peristiwa Alam
    -Download Buku Pendidikan Agama Konghucu dan Budi Pekerti Kelas 1 Tema 8: Peristiwa Alam
    -Download Buku Seni Budaya dan Prakarya Kelas 1 Tema 8: Peristiwa Alam
    -Download Buku Penjasorkes Kelas 1 Tema 8: Peristiwa Alam
    -Download Buku Bahasa Indonesia Kelas 1 Tema 8: Peristiwa Alam
    -Download Buku Matematika Kelas 1 Tema 8: Peristiwa Alam
    -Download Buku IPA Kelas 1 Tema 8: Peristiwa Alam
    -Download Buku IPS Kelas 1 Tema 8: Peristiwa Alam
    -Download RPP Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam
    -Download Silabus Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam
    -Download KI KD Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam
    -Download LKPD Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam
    -Download Lembar Kerja Siswa Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam
    -Download Soal Ulangan Harian Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam
    -Download Soal UTS Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam
    -Download Soal UAS Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam
    -Download Soal PAS Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam
    -Download Soal PAT Kurikulum 2013 Kelas 1 Tema 8: Peristiwa Alam
    -Download Kunci Jawaban Soal Kurikulum 2013 Kelas I Tema VIII : Peristiwa Alam
    -Contoh Pembelajaran Daring Kurikulum I013 Kls I Tma VIII : Prstw Alm
    -Contoh Media Pembelajaran Interaktif Kurikulum I013 Kls I Tma VIII : Prstw Alm
    -Contoh Video Pembelajaran Kurikulum I013 Kls I Tma VIII : Prstw Alm
    -Contoh Laporan Hasil Belajar Siswa Kurikulum I013 Kls I Tma VIII : Prstw Alm
    -Contoh Portofolio Siswa Kurikulum I013 Kls I Tma VIII : Prstw Alm
    -Contoh Jurnal Refleksi Siswa Kurikulum I013 Kls I Tma VIII : Prstw Alm
    -Contoh Jurnal Refleksi Guru Kurikulum I013 Kls I Tma VIII : Prstw Alm

    - -

    Langkah-langkah Download Buku Tema 8 Kelas 1: Peristiwa Alam

    -

    Berikut adalah langkah-langkah yang dapat diikuti untuk mengunduh buku tema 8 kelas 1 secara mudah dan cepat:

    -
      -
    1. Buka situs resmi Kementerian Pendidikan dan Kebudayaan di alamat https://www.kemdikbud.go.id/.
    2. -
    3. Pilih menu "Buku" di bagian atas halaman.
    4. -
    5. Pilih sub-menu "Buku Sekolah Elektronik (BSE)" di bagian kiri halaman.
    6. -
    7. Pilih kategori "Buku Siswa" di bagian tengah halaman.
    8. -
    9. Pilih jenjang "SD/MI" di bagian bawah halaman.
    10. -
    11. Pilih mata pelajaran "Tema" di bagian kanan halaman.
    12. -
    13. Pilih buku "Tema 8 - Peristiwa Alam" di bagian bawah halaman.
    14. -
    15. Klik tombol "Unduh Buku" di bagian kanan halaman.
    16. -
    17. Tunggu proses pengunduhan selesai dan simpan file buku di perangkat Anda.
    18. -
    -

    Apa Saja Isi Buku Tema 8 Kelas 1: Peristiwa Alam?

    -

    Subtema 1: Peristiwa Siang dan Malam

    -

    Subtema pertama ini membahas tentang peristiwa siang dan malam yang terjadi akibat rotasi bumi. Siswa akan belajar tentang konsep waktu, arah mata angin, zona waktu, serta aktivitas manusia dan hewan yang berbeda pada siang dan malam hari. Siswa juga akan melakukan beberapa kegiatan menarik, seperti membuat jam matahari, menentukan arah mata angin dengan kompas, dan menggambar pemandangan siang dan malam.

    -

    Subtema 2: Kemarau

    -

    Subtema kedua ini membahas tentang peristiwa kemarau yang terjadi akibat revolusi bumi. Siswa akan belajar tentang musim kemarau di Indonesia, ciri-ciri cuaca kemarau, dampak kemarau bagi manusia dan lingkungan, serta cara mengatasi dan mencegah kemarau. Siswa juga akan melakukan beberapa kegiatan menarik, seperti membuat grafik curah hujan, menulis puisi tentang kemarau, dan membuat poster hemat air.

    -

    Subtema 3: Penghujan

    -

    Subtema ketiga ini membahas tentang peristiwa pengh

    Subtema ketiga ini membahas tentang peristiwa penghujan yang terjadi akibat revolusi bumi. Siswa akan belajar tentang musim penghujan di Indonesia, ciri-ciri cuaca penghujan, dampak penghujan bagi manusia dan lingkungan, serta cara mengatasi dan mencegah banjir. Siswa juga akan melakukan beberapa kegiatan menarik, seperti membuat grafik curah hujan, menulis puisi tentang penghujan, dan membuat poster siaga banjir.

    -

    Subtema 4: Bencana Alam

    -

    Subtema keempat ini membahas tentang peristiwa bencana alam yang terjadi akibat perubahan alam yang ekstrem. Siswa akan belajar tentang jenis-jenis bencana alam, penyebab dan dampak bencana alam, serta cara menghadapi dan mengurangi risiko bencana alam. Siswa juga akan melakukan beberapa kegiatan menarik, seperti membuat peta sebaran bencana alam, menulis cerita tentang pengalaman menghadapi bencana alam, dan membuat simulasi evakuasi bencana alam.

    -

    Apa Manfaat Buku Tema 8 Kelas 1: Peristiwa Alam?

    -

    Manfaat Bagi Siswa

    -

    Buku tema 8 kelas 1 ini memiliki banyak manfaat bagi siswa, di antaranya adalah:

    - -

    Manfaat Bagi Guru

    -

    Buku tema 8 kelas 1 ini juga memiliki banyak manfaat bagi guru, di antaranya adalah:

    - -

    Kesimpulan

    -

    Buku tema 8 kelas 1 adalah buku tematik yang membahas tentang peristiwa alam yang terjadi di sekitar kita. Buku ini dapat diunduh secara gratis dan legal melalui situs resmi Kementerian Pendidikan dan Kebudayaan. Buku ini memiliki empat subtema, yaitu peristiwa siang dan malam, kemarau, penghujan, dan bencana alam. Buku ini juga memiliki banyak manfaat bagi siswa dan guru dalam meningkatkan pengetahuan, keterampilan, sikap, dan nilai-nilai karakter yang berkaitan dengan peristiwa alam. Semoga artikel ini bermanfaat bagi Anda yang ingin download buku tema 8 kelas 1. Selamat belajar!

    -

    FAQ

    -

    Berikut adalah beberapa pertanyaan yang sering diajukan seputar buku tema 8 kelas 1:

    -
      -
    1. Apa saja sumber belajar lain yang dapat digunakan untuk mendukung pembelajaran tema 8 kelas 1?
      Anda dapat menggunakan sumber belajar lain yang relevan dengan tema 8 kelas 1, seperti buku referensi, media audiovisual, internet, lingkungan sekitar, atau narasumber ahli.
    2. -
    3. Apa saja kegiatan penilaian yang dapat dilakukan untuk mengukur pencapaian kompetensi siswa pada tema 8 kelas 1?Apa saja kegiatan penilaian yang dapat dilakukan untuk mengukur pencapaian kompetensi siswa pada tema 8 kelas 1?
      Anda dapat melakukan kegiatan penilaian yang sesuai dengan indikator pencapaian kompetensi yang terdapat pada setiap subtema. Beberapa contoh kegiatan penilaian yang dapat dilakukan adalah tes tertulis, tes lisan, tes praktik, observasi, penugasan, portofolio, proyek, atau kinerja.
    4. -
    5. Bagaimana cara memberikan umpan balik yang efektif kepada siswa setelah melakukan penilaian pada tema 8 kelas 1?
      Anda dapat memberikan umpan balik yang efektif kepada siswa dengan cara yang berikut ini:
    6. - -
    7. Apa saja tantangan yang mungkin dihadapi oleh guru dan siswa dalam pembelajaran tema 8 kelas 1?
      Beberapa tantangan yang mungkin dihadapi oleh guru dan siswa dalam pembelajaran tema 8 kelas 1 adalah sebagai berikut:
    8. - -
    9. Bagaimana cara mengatasi tantangan-tantangan tersebut?
      Beberapa cara yang dapat dilakukan untuk mengatasi tantangan-tantangan tersebut adalah sebagai berikut:
    10. - -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Gorebox Animosity 10.0.3 APK and Enter the Chaotic World of Gorebox a Game of Extreme Violence.md b/spaces/1phancelerku/anime-remove-background/Download Gorebox Animosity 10.0.3 APK and Enter the Chaotic World of Gorebox a Game of Extreme Violence.md deleted file mode 100644 index 312c5401ef4b453294898a36b6bdeb8cb291ab30..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Gorebox Animosity 10.0.3 APK and Enter the Chaotic World of Gorebox a Game of Extreme Violence.md +++ /dev/null @@ -1,115 +0,0 @@ - -

    Gorebox Animosity 10.0.3 APK: A Sandbox Game of Extreme Violence

    -

    Do you love games that let you unleash your inner demon and cause mayhem and destruction? If so, you might want to check out Gorebox Animosity 10.0.3 APK, a physics-based sandbox game of extreme violence. In this game, you can use a vast arsenal of brutal weapons, explosive devices, interactive ragdolls, fearsome enemies, advanced turrets, vehicles, and a cutting-edge blood and dismemberment system to create your own scenarios of carnage and chaos.

    -

    What is Gorebox Animosity?

    -

    Gorebox Animosity is a sandbox game that gives you complete freedom to do whatever you want in a chaotic world. You can choose from different maps, game modes, weapons, devices, ragdolls, enemies, turrets, and vehicles to create your own scenarios of gore and violence.

    -

    gorebox animosity 10.0.3 apk


    DOWNLOADhttps://jinyurl.com/2uNKAl



    -

    A physics-based sandbox game

    -

    Gorebox Animosity uses realistic physics to simulate the effects of your actions on the environment and the ragdolls. You can see how the ragdolls react to different forces, impacts, explosions, cuts, burns, electrocutions, and more. You can also manipulate the ragdolls with your fingers or use tools like ropes, hooks, magnets, springs, balloons, etc.

    -

    A vast arsenal of brutal weapons and devices

    -

    Gorebox Animosity offers you a wide range of weapons and devices to inflict pain and damage on the ragdolls and enemies. You can use guns, knives, axes, hammers, chainsaws, grenades, rockets, mines, bombs, nukes, lasers, flamethrowers, plasma cannons, tesla coils, etc.

    -

    Interactive ragdolls, enemies, turrets, and vehicles

    -

    Gorebox Animosity features interactive ragdolls that you can customize with different skins, clothes, accessories, hairstyles, etc. You can also spawn different types of enemies like zombies, mutants, soldiers, robots, aliens, etc. to fight against or with them. You can also use turrets like machine guns, shotguns, snipers, flak cannons, etc. to defend yourself or attack others. You can also drive vehicles like cars, trucks, tanks, helicopters, jets, etc. to explore the maps or cause more destruction.

    -

    What's new in Gorebox Animosity 10.0.3 APK?

    -

    Gorebox Animosity 10.0.3 APK is the latest version of the game that was released on August 27th, 2022. This version includes a big patch with bug fixes and improvements, as well as some new features like custom names and skins for players and ragdolls, more synchronizations and animations for multiplayer mode, and some minor changes in the user interface and the game mechanics. Here are some of the highlights of the new version:

    A big patch with bug fixes and improvements

    -

    Gorebox Animosity 10.0.3 APK fixes some of the bugs and glitches that were reported by the players, such as crashes, freezes, lag, errors, etc. It also improves the performance, stability, and compatibility of the game with different devices and platforms. It also optimizes the graphics, sounds, and controls of the game for a better gaming experience.

    -

    Custom names and skins for players and ragdolls

    -

    Gorebox Animosity 10.0.3 APK allows you to customize your name and skin in the game. You can choose from different colors, patterns, textures, stickers, etc. to create your own unique look. You can also change the name and skin of the ragdolls that you spawn or interact with. You can save your customizations and use them in different game modes and maps.

    -

    More synchronizations and animations for multiplayer mode

    -

    Gorebox Animosity 10.0.3 APK enhances the multiplayer mode of the game by adding more synchronizations and animations for the players and the ragdolls. You can see how other players move, interact, shoot, explode, etc. in real-time. You can also see how the ragdolls react to different actions and events in the game. You can also chat with other players using text or voice messages.

    -

    How to download and install Gorebox Animosity 10.0.3 APK?

    -

    If you want to download and install Gorebox Animosity 10.0.3 APK on your device, you need to follow these simple steps:

    -

    gorebox animosity update 10.0.3 download
    -gorebox animosity 10.0.3 apk mediafire
    -gorebox animosity multiplayer gameplay
    -gorebox animosity cheat code
    -gorebox animosity discord server
    -gorebox animosity trailer event
    -gorebox animosity cinematic 3
    -gorebox animosity full soundtrack
    -gorebox animosity scariest moments
    -gorebox animosity how to open the door
    -gorebox animosity custom names and skins
    -gorebox animosity more synchronizations
    -gorebox animosity fixed animations
    -gorebox animosity big patch
    -gorebox animosity net energy gain
    -gorebox animosity physics-based sandbox game
    -gorebox animosity brutal weapons
    -gorebox animosity explosive devices
    -gorebox animosity interactive ragdolls
    -gorebox animosity fearsome enemies
    -gorebox animosity advanced turrets
    -gorebox animosity vehicles
    -gorebox animosity blood and dismemberment system
    -gorebox animosity flight with plasma field
    -gorebox animosity challenge from Suddy
    -gorebox animosity mega collab
    -gorebox animosity best doctor in the world
    -gorebox animosity 8.3.2 update
    -gorebox animosity 8.5.4 update
    -gorebox animosity 9.0.0 update
    -gorebox animosity 10.0.2 update
    -gorebox apk free download apkcombo
    -download gorebox android game apkcombo
    -enter the chaotic world of gorebox apkcombo
    -unleash your inner demon with gorebox apkcombo
    -how to install gorebox apk on android apkcombo
    -what's new in gorebox apk version 10.0.3 apkcombo
    -how to play gorebox online with friends apkcombo
    -how to create custom maps in gorebox apkcombo
    -how to mod gorebox with new weapons and skins apkcombo

    -

    Download the APK file from a trusted source

    -

    The first step is to download the APK file of Gorebox Animosity 10.0.3 from a trusted source. You can use this link to download the file directly from Google Drive. The file size is about 100 MB, so make sure you have enough space on your device.

    -

    Enable unknown sources on your device settings

    -

    The second step is to enable unknown sources on your device settings. This will allow you to install apps that are not from the official app store. To do this, go to your device settings, then security or privacy, then unknown sources or install unknown apps, then toggle on the option or allow from this source.

    -

    Install the APK file and launch the game

    -

    The third step is to install the APK file and launch the game. To do this, locate the downloaded file on your device storage, then tap on it to start the installation process. Follow the instructions on the screen to complete the installation. Once done, you can launch the game from your app drawer or home screen.

    -

    How to play Gorebox Animosity 10.0.3 APK?

    -

    If you want to play Gorebox Animosity 10.0.3 APK on your device, you need to follow these simple steps:

    -

    Choose a map and a game mode

    -

    The first step is to choose a map and a game mode that you want to play. You can choose from different maps like city, desert, island, forest, etc., each with its own features and challenges. You can also choose from different game modes like sandbox, survival, deathmatch, capture the flag, etc., each with its own rules and objectives.

    -

    Use the joystick and buttons to move and interact

    -

    The second step is to use the joystick and buttons to move and interact in the game. You can use the joystick on the left side of the screen to move around and rotate your camera angle. You can use the buttons on the right side of the screen to jump, crouch, shoot, reload, switch weapons, interact with objects, etc.

    -

    Explore the chaotic world and unleash your inner demon

    -

    The third step is to explore the chaotic world and unleash your inner demon in the game. You can use your weapons and devices to inflict pain and damage on the ragdolls and enemies that you encounter or spawn in the game. You can also use your fingers or tools to manipulate them in different ways. You can also drive vehicles, use turrets, set traps, etc. to cause more mayhem and destruction. You can also play with other players online or offline in multiplayer mode.

    -

    Why should you play Gorebox Animosity 10.0.3 APK?

    -

    If you are still wondering why you should play Gorebox Animosity 10.0.3 APK on your device, here are some of the reasons why you should give it a try:

    A fun and addictive game for fans of gore and violence

    -

    Gorebox Animosity 10.0.3 APK is a game that will appeal to fans of gore and violence. If you enjoy games that let you cause mayhem and destruction, you will love this game. You can have fun and relax by playing this game and releasing your stress and anger. You can also challenge yourself by trying different game modes and maps.

    -

    A cutting-edge blood and dismemberment system

    -

    Gorebox Animosity 10.0.3 APK features a cutting-edge blood and dismemberment system that makes the game more realistic and immersive. You can see how the blood splatters, drips, stains, and pools on the ground and the objects. You can also see how the ragdolls and enemies get cut, torn, ripped, smashed, burned, etc. in different ways. You can also customize the amount and color of the blood in the game settings.

    -

    A creative and customizable game with endless possibilities

    -

    Gorebox Animosity 10.0.3 APK is a game that gives you complete freedom to create your own scenarios of gore and violence. You can customize your name, skin, weapons, devices, ragdolls, enemies, turrets, vehicles, etc. in the game. You can also use your imagination and creativity to make your own maps, game modes, stories, etc. in the game. You can also share your creations with other players online or offline.

    -

    Conclusion

    -

    Gorebox Animosity 10.0.3 APK is a physics-based sandbox game of extreme violence that lets you unleash your inner demon and cause mayhem and destruction in a chaotic world. You can use a vast arsenal of brutal weapons, explosive devices, interactive ragdolls, fearsome enemies, advanced turrets, vehicles, and a cutting-edge blood and dismemberment system to create your own scenarios of carnage and chaos. You can also customize your name, skin, weapons, devices, ragdolls, enemies, turrets, vehicles, etc. in the game. You can also play with other players online or offline in multiplayer mode. You can download and install Gorebox Animosity 10.0.3 APK on your device by following the simple steps mentioned above. If you are a fan of gore and violence, you should definitely try this game.

    -

    FAQs

    -

    Here are some of the frequently asked questions about Gorebox Animosity 10.0.3 APK:

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    QuestionAnswer
    Is Gorebox Animosity 10.0.3 APK free to play?Yes, Gorebox Animosity 10.0.3 APK is free to play. You can download and install it on your device without paying anything. However, the game may contain ads or in-app purchases that you can choose to buy or not.
    Is Gorebox Animosity 10.0.3 APK safe to play?Yes, Gorebox Animosity 10.0.3 APK is safe to play. The game does not contain any viruses or malware that can harm your device or data. However, the game is rated for mature audiences only due to its graphic content of gore and violence. You should play it at your own discretion and responsibility.
    Is Gorebox Animosity 10.0.3 APK compatible with my device?Gorebox Animosity 10.0.3 APK is compatible with most devices that run on Android 4.4 or higher. However, the game may not work properly on some devices due to different specifications or settings. You should check the compatibility of your device before downloading and installing the game.
    How can I contact the developer of Gorebox Animosity 10.0.3 APK?You can contact the developer of Gorebox Animosity 10.0.3 APK by sending an email to goreboxgame@gmail.com. You can also follow their social media accounts on Facebook, Twitter, Instagram, YouTube, etc. to get updates, news, tips, etc. about the game.
    How can I support the development of Gorebox Animosity 10.0.3 APK?You can support the development of Gorebox Animosity 10.0.3 APK by rating and reviewing the game on the app store or Google Play Store. You can also share the game with your friends and family who might enjoy it. You can also buy some in-app purchases or donate some money to the developer to support their work and show your appreciation. You can also join their Patreon page or Discord server to get exclusive rewards, access, and feedback.
    -

    I hope you enjoyed reading this article and learned something new about Gorebox Animosity 10.0.3 APK. If you have any questions, comments, or suggestions, feel free to leave them below. Thank you for your time and attention.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download RPG Isekai Rondo MOD APK for Android - Enjoy the Ultimate Job in a Parallel World.md b/spaces/1phancelerku/anime-remove-background/Download RPG Isekai Rondo MOD APK for Android - Enjoy the Ultimate Job in a Parallel World.md deleted file mode 100644 index d5bc9e8375d2bcd7a1b8609eb86482803191e14d..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download RPG Isekai Rondo MOD APK for Android - Enjoy the Ultimate Job in a Parallel World.md +++ /dev/null @@ -1,86 +0,0 @@ -
    -

    How to Download RPG Isekai Rondo Mod Apk and Enjoy a Thrilling Adventure in a Parallel World

    -

    Do you love playing role-playing games that take you to a different world where you can become a hero and save the day? If yes, then you should try RPG Isekai Rondo, a pixel-art RPG that gives you a chance to experience a parallel universe with exciting turn-based battles and retro-style graphics. In this article, we will tell you what RPG Isekai Rondo is, how to download and install its mod apk version on your Android device, and how to play it with some useful tips and tricks. Let's get started!

    -

    download rpg isekai rondo mod apk


    Download Ziphttps://jinyurl.com/2uNO6U



    -

    What is RPG Isekai Rondo?

    -

    RPG Isekai Rondo is a game developed by KEMCO, a Japanese company that specializes in creating RPGs for mobile platforms. The game was released in February 2023 for Android and iOS devices. The game belongs to the isekai genre, which typically involves a person being transported to a parallel world, usually with fantasy elements.

    -

    The story and the gameplay of RPG Isekai Rondo

    -

    The game tells the story of Sho, an ordinary man who works at an exploitative company in modern Japan. One day, he gets into an accident and finds himself reincarnated in a parallel world as Shaw, a sage with ultra rare passive skills. He meets Viola, a hero who is on a mission to defeat the overlord who threatens the world. Together, they embark on a thrilling adventure that will test their skills and courage.

    -

    The gameplay of RPG Isekai Rondo is based on turn-based battles, where you can use various skills and items to defeat your enemies. You can also summon spirits, manage mana plants, and acquire monster skills to enhance your abilities. You can also explore different locations, complete quests, raise your guild rank, challenge other parties, participate in arenas, and loot dungeons for treasure chests.

    -

    The features and the benefits of RPG Isekai Rondo mod apk

    -

    If you want to enjoy RPG Isekai Rondo with more ease and fun, you should download its mod apk version, which offers several advantages over the original version. Some of the features and benefits of RPG Isekai Rondo mod apk are:

    -

    download rpg isekai rondo mod apk free
    -download rpg isekai rondo mod apk latest version
    -download rpg isekai rondo mod apk unlimited money
    -download rpg isekai rondo mod apk offline
    -download rpg isekai rondo mod apk for android
    -download rpg isekai rondo mod apk premium edition
    -download rpg isekai rondo mod apk no ads
    -download rpg isekai rondo mod apk full unlocked
    -download rpg isekai rondo mod apk english
    -download rpg isekai rondo mod apk hack
    -download rpg isekai rondo mod apk cheat
    -download rpg isekai rondo mod apk mega mod
    -download rpg isekai rondo mod apk update
    -download rpg isekai rondo mod apk 1.1.3g
    -download rpg isekai rondo mod apk kemco
    -download rpg isekai rondo mod apk exe-create
    -download rpg isekai rondo mod apk role playing game
    -download rpg isekai rondo mod apk parallel universe
    -download rpg isekai rondo mod apk sage job
    -download rpg isekai rondo mod apk overlord battle
    -download rpg isekai rondo mod apk passive skills
    -download rpg isekai rondo mod apk dungeon quest
    -download rpg isekai rondo mod apk guild rank
    -download rpg isekai rondo mod apk battle arena
    -download rpg isekai rondo mod apk treasure chest
    -download RPG Isekai Rondo APK (Android Game) - Free Download - APKCombo
    -RPG Isekai Rondo APK (Android Game) - Free Download APKCombo Games Role Playing RPG Isekai Rondo RPG Isekai Rondo 1.1.3g KEMCO Download APK (177 MB)
    -Quest for a parallel universe after being reborn with the ultimate job! Description Role Playing Advertisement Latest Version Version 1.1.3g (113) Update Mar 19, 2023 Developer KEMCO Category Role Playing Google Play ID kemco.execreate.isekai Installs 50,000+ App APKs Isekai Rondo APK 異世界輪舞 APK RPG Isekai Rondo GAME Sho, a young man who spends his days in an exploitative company in modern Japan, is reincarnated into a parallel universe as Shaw, due to an accident! While the job given in his new life is 'Unemployed' and hardships continue with a bad luck party, one day he acquires the ultimate job as a Sage, suddenly becoming the strongest with ultra rare passive skills! Aiming for the long-awaited mellow life he finds himself in a battle to defeat the Overlord after meeting the Hero Viola. With exceptional passive skills that for example can change the future by returning before Game Over, get rid of enemies in turn-based battles by summoning spirits or managing mana plants, or even use the skills of monsters! Complete quests and raise the guild rank so you can challenge deeper dungeon or other adventuring parties, too! With other elements such as a battle arena and the dungeon where treasure chests are automatically generated, your adventure against the Overlord's Army will surely be an unparalleled quest to finally gain a calm, modest life! * This app contains ads in some screens. The game itself can be played in its entirety for free. * Ads can be removed through in-app purchases by purchasing the Ad Eliminator. Please note that the Ad Eliminator of the freemium edition does not include the bonus 150 Magistones. * A premium edition with 150 bonus Magistones is also available. [6](https://play.google.com/store/apps/details?id=kemco.execreate.isekaipremium) (Save data cannot be transferred between the Premium and freemium editions.) [IMPORTANT NOTICE] Your use of the application requires your agreement to the following EULA and 'Privacy Policy and Notice'. If you do not agree, please do not download our application. End User License Agreement: [5](http://kemco.jp/eula/index.html) Privacy Policy and Notice: [4](http://www.kemco.jp/app_pp/privacy.html) [Game Controller] - Partially optimized [Languages] - English, Japanese [SD Card Storage] - Enabled (Save backup/transfer are not supported.) [Non-Supported Devices] This app has generally been tested to work on any mobile device released in Japan. We cannot guarantee full support on other devices. If you have the Developer Options enabled in your

    - -

    With these features and benefits, you can enjoy RPG Isekai Rondo mod apk with more freedom and fun. You can also save your time and money by getting everything you need for free.

    -

    How to Download and Install RPG Isekai Rondo Mod Apk on Your Android Device

    -

    Now that you know what RPG Isekai Rondo mod apk is and what it offers, you may be wondering how to download and install it on your Android device. Don't worry, we will guide you through the process step by step. Just follow these simple instructions:

    -

    Step 1: Enable unknown sources on your device

    -

    Before you can install RPG Isekai Rondo mod apk on your device, you need to enable unknown sources, which will allow you to install apps from sources other than the Google Play Store. To do this, go to your device's settings, then security, then unknown sources, and toggle it on. You may see a warning message, but just ignore it and confirm your choice.

    -

    Step 2: Download the RPG Isekai Rondo mod apk file from a trusted source

    -

    Next, you need to download the RPG Isekai Rondo mod apk file from a trusted source. There are many websites that claim to offer the mod apk file, but not all of them are safe and reliable. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you should be careful and choose a reputable source that has positive reviews and feedback from other users. One such source is [RPG Isekai Rondo Mod Apk Download], which provides the latest version of the mod apk file with all the features and benefits mentioned above.

    -

    To download the RPG Isekai Rondo mod apk file from this source, just click on the link above and you will be redirected to the download page. There, you will see a button that says "Download Now". Click on it and wait for the download to start. The file size is about 100 MB, so it may take a few minutes depending on your internet speed.

    -

    Step 3: Locate and install the RPG Isekai Rondo mod apk file on your device

    -

    Once the download is complete, you need to locate and install the RPG Isekai Rondo mod apk file on your device. To do this, go to your device's file manager and find the folder where you saved the downloaded file. It may be in your downloads folder or in any other folder that you specified. Tap on the file and you will see a pop-up window that asks you to install the app. Tap on "Install" and wait for the installation to finish.

    -

    Step 4: Launch the game and enjoy the mod features

    -

    Congratulations! You have successfully installed RPG Isekai Rondo mod apk on your device. Now, you can launch the game by tapping on its icon on your home screen or app drawer. You will see a menu mod icon on the top right corner of the screen. Tap on it and you will be able to access various options to customize your game settings. You can also check your currency, characters, skills, etc., and see that they are all unlimited and unlocked. Enjoy playing RPG Isekai Rondo with all the mod features!

    -

    How to Play RPG Isekai Rondo with Tips and Tricks

    -

    RPG Isekai Rondo is a fun and addictive game that will keep you entertained for hours. However, it can also be challenging and complex at times, especially if you are new to the game or want to advance faster. That's why we have prepared some tips and tricks that will help you play RPG Isekai Rondo better and smarter. Here they are:

    -

    Tip 1: Choose your character and element wisely

    -

    In RPG Isekai Rondo, you can choose from four different characters: Shaw (sage), Viola (hero), Lila (priestess), or Leon (knight). Each character has a different element: fire, water, wind, or earth. Each element has its own advantages and disadvantages against other elements. For example, fire is strong against wind but weak against water. You should choose your character and element based on your preference and strategy.

    -

    Tip 2: Understand the effects and passives of your skills

    -

    Each character has four skills that they can use in battles: one normal skill, one special skill, one ultimate skill, and one and use wind skills instead. You can also use skills that lower the enemy's resistance level or increase your own element power.

    -

    Tip 4: Earn extra points by joining guild quests and participating in daily tasks

    -

    One way to earn more points and rewards in RPG Isekai Rondo is to join guild quests and participate in daily tasks. Guild quests are missions that you can complete with other players in your guild. They can range from defeating a certain number of enemies, collecting a certain amount of items, or clearing a certain stage. By completing guild quests, you can earn guild points, which you can use to exchange for various items and features in the guild shop. You can also increase your guild rank and reputation by completing guild quests.

    -

    Daily tasks are tasks that you can complete every day to earn extra points and rewards. They can include logging in, playing for a certain amount of time, using a certain skill, etc. By completing daily tasks, you can earn daily points, which you can use to spin the daily roulette and get various prizes, such as gold, magistones, items, etc. You can also get bonus rewards by completing all the daily tasks.

    -

    Tip 5: Explore the battle arena and the dungeon for more rewards and challenges

    -

    If you want to test your skills and challenge yourself in RPG Isekai Rondo, you should explore the battle arena and the dungeon. The battle arena is a place where you can fight against other players in real-time battles. You can choose from three modes: ranked, casual, or friendly. By winning battles, you can earn arena points, which you can use to exchange for various items and features in the arena shop. You can also increase your arena rank and reputation by winning battles.

    -

    The dungeon is a place where you can explore different floors and rooms filled with enemies, traps, and treasures. You can choose from three difficulties: easy, normal, or hard. By clearing floors and rooms, you can earn dungeon points, which you can use to exchange for various items and features in the dungeon shop. You can also find rare items and monsters in the dungeon.

    -

    Conclusion

    -

    RPG Isekai Rondo is a game that will give you a lot of fun and excitement as you travel to a parallel world and become a powerful sage. You can enjoy the game even more by downloading its mod apk version, which gives you access to unlimited currency, characters, skills, etc. You can also play the game better and smarter by following our tips and tricks on how to choose your character and element, understand your skills, observe your enemies, earn extra points, and explore the battle arena and the dungeon. We hope that this article has helped you learn how to download RPG Isekai Rondo mod apk and enjoy a thrilling adventure in a parallel world.

    -

    FAQs

    -

    Here are some frequently asked questions about RPG Isekai Rondo mod apk:

    -

    Q: Is RPG Isekai Rondo mod apk safe to download and install?

    -

    A: Yes, RPG Isekai Rondo mod apk is safe to download and install as long as you get it from a trusted source like [RPG Isekai Rondo Mod Apk Download]. However, you should always be careful when downloading any mod apk file from unknown sources as they may contain harmful viruses or malware.

    -

    Q: Do I need to root my device to use RPG Isekai Rondo mod apk?

    -

    A: No, you do not need to root your device to use RPG Isekai Rondo mod apk. The mod apk file will work on any Android device without requiring any root access or permission.

    -

    Q: Will RPG Isekai Rondo mod apk affect my original game data?

    -

    A: No, RPG Isekai Rondo mod apk will not affect your original game data as it will create a separate folder for its data on your device. You can still play the original version of the game without any problem.

    -

    Q: How do I update RPG Isekai Rondo mod apk?

    -

    A: To update RPG Isekai Rondo mod apk, you need to download the latest version of the mod apk file from [RPG Isekai Rondo Mod Apk Download] and install it on your device. You do not need to uninstall the previous version of the mod apk file as it will overwrite it automatically.

    -

    Q: How do I contact the developer of RPG Isekai Rondo mod apk?

    -

    A: If you have any questions or feedback about RPG Isekai Rondo mod apk, you can contact the developer of the mod apk file by visiting their website [RPG Isekai R ondo Mod Apk Download]. You can also follow them on their social media accounts [RPG Isekai Rondo Mod Apk Facebook] and [RPG Isekai Rondo Mod Apk Twitter]. They are very responsive and friendly and will answer your queries as soon as possible.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Red Ball 4 Mod APK and Play 100 Square Stages with PremiumUnlocked Benefits.md b/spaces/1phancelerku/anime-remove-background/Download Red Ball 4 Mod APK and Play 100 Square Stages with PremiumUnlocked Benefits.md deleted file mode 100644 index 22c75e259709d2129fffc900f7c70b84d250ff01..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Red Ball 4 Mod APK and Play 100 Square Stages with PremiumUnlocked Benefits.md +++ /dev/null @@ -1,93 +0,0 @@ - -

    Red Ball 4 Mod 100 Square APK: A Fun and Challenging Game for Android Users

    -

    If you are looking for a fun and challenging game to play on your Android device, you might want to check out Red Ball 4 Mod 100 Square APK. This is a modified version of the popular Red Ball 4 game, which has over 100 million downloads on Google Play Store. In this article, we will tell you what Red Ball 4 is, what the mod 100 square apk is, why you should download it, and how to do it. We will also highlight some of the features of this amazing game that will keep you entertained for hours.

    -

    red ball 4 mod 100 square apk


    Download ……… https://jinyurl.com/2uNT9B



    -

    Introduction

    -

    What is Red Ball 4?

    -

    Red Ball 4 is a physics-based platformer game developed by FDG Entertainment GmbH & Co.KG. The game follows the adventures of a red ball who has to save the world from the evil black squares who want to turn everything into cubes. The game has four episodes, each with 15 levels of increasing difficulty. The game also has a level editor where you can create your own levels and share them with other players.

    -

    What is the mod 100 square apk?

    -

    The mod 100 square apk is a modified version of Red Ball 4 that gives you some extra features and advantages. The mod allows you to have unlimited lives and coins, which means you can play as long as you want without worrying about dying or running out of money. The mod also lets you change the appearance of your ball and choose from different colors and patterns. The mod also adds some new elements to the game, such as epic boss fights, cloud service, and HID controller support.

    -

    Why should you download it?

    -

    You should download Red Ball 4 Mod 100 Square APK if you want to enjoy the game to the fullest. The mod gives you more freedom and flexibility to play the game as you like. You can customize your ball, explore more levels, and challenge yourself with harder enemies. The mod also makes the game more fun and exciting with its groovy music and stunning graphics. The mod is also easy to download and install, as we will show you in the next section.

    -

    red ball 4 premium unlocked apk download
    -red ball 4 mod apk unlimited lives and stars
    -red ball 4 hack apk latest version
    -red ball 4 mod apk android 1
    -red ball 4 mod apk revdl
    -red ball 4 mod apk no ads
    -red ball 4 mod apk all levels unlocked
    -red ball 4 mod apk free shopping
    -red ball 4 mod apk unlimited money
    -red ball 4 mod apk happymod
    -red ball 4 mod apk rexdl
    -red ball 4 mod apk an1
    -red ball 4 mod apk offline
    -red ball 4 mod apk online
    -red ball 4 mod apk pure
    -red ball 4 mod apk uptodown
    -red ball 4 mod apk apkpure
    -red ball 4 mod apk mob.org
    -red ball 4 mod apk android oyun club
    -red ball 4 mod apk andropalace
    -red ball 4 mod apk blackmod
    -red ball 4 mod apk cheat
    -red ball 4 mod apk dlandroid
    -red ball 4 mod apk everything unlocked
    -red ball 4 mod apk for pc
    -red ball 4 mod apk gamestechy
    -red ball 4 mod apk ihackedit
    -red ball 4 mod apk lenov.ru
    -red ball 4 mod apk mega
    -red ball 4 mod apk onhax
    -red ball 4 mod apk platinmods
    -red ball 4 mod apk unlimited health
    -red ball 4 mod apk vipmods
    -red ball 4 mod menu apk download
    -download game red ball 4 mod apk versi terbaru
    -how to install red ball 4 mod apk on android device
    -where to download red ball 4 mod apk for free
    -what is new in red ball 4 mod apk update
    -how to play red ball 4 with modded apk file
    -how to get more stars in red ball 4 using modded apk file

    -

    Features of Red Ball 4 Mod 100 Square APK

    -

    Unlimited lives and coins

    -

    One of the best features of Red Ball 4 Mod 100 Square APK is that it gives you unlimited lives and coins. This means that you can play as long as you want without worrying about dying or running out of money. You can use the coins to buy power-ups, such as rockets, magnets, and shields, that will help you overcome obstacles and enemies. You can also use the coins to unlock new balls with different abilities and stats.

    -

    Customizable ball appearance

    -

    Another great feature of Red Ball 4 Mod 100 Square APK is that it lets you customize your ball appearance. You can choose from different colors and patterns for your ball, such as blue, green, yellow, pink, striped, dotted, etc. You can also change the shape of your ball, such as square, triangle, star, etc. You can mix and match different options to create your own unique ball that suits your style and personality.

    -

    Epic boss battles

    -

    Red Ball 4 Mod 100 Square APK also adds some epic boss battles to the game. These are challenging and thrilling levels where you have to face the black square bosses who have different powers and abilities. You have to use your skills and strategies to defeat them and save the world. The boss battles are fun and rewarding, as they test your reflexes and creativity.

    -

    Cloud save and HID controller support

    -

    Red Ball 4 Mod 100 Square APK also supports cloud save and HID controller. This means that you can save your progress online and access it from any device. You can also play the game with a physical controller, such as a gamepad or a joystick, for a more comfortable and immersive experience. These features make the game more convenient and enjoyable for all types of players.

    -

    How to download and install Red Ball 4 Mod 100 Square APK

    -

    Step 1: Enable unknown sources on your device

    -

    Before you can download and install Red Ball 4 Mod 100 Square APK, you need to enable unknown sources on your device. This will allow you to install apps that are not from the official Google Play Store. To do this, go to your device settings, then security, then unknown sources, and toggle it on. You may see a warning message, but don't worry, it is safe to proceed.

    -

    Step 2: Download the apk file from a trusted source

    -

    Next, you need to download the apk file of Red Ball 4 Mod 100 Square APK from a trusted source. You can find many websites that offer the apk file, but be careful, as some of them may contain viruses or malware. We recommend you to use this link, which is verified and safe. Once you click on the link, you will see a download button. Tap on it and wait for the download to finish.

    -

    Step 3: Install the apk file and launch the game

    -

    Finally, you need to install the apk file and launch the game. To do this, go to your file manager and locate the downloaded apk file. Tap on it and follow the instructions on the screen. The installation process may take a few seconds or minutes, depending on your device. Once the installation is done, you will see an icon of Red Ball 4 on your home screen or app drawer. Tap on it and enjoy the game!

    -

    Conclusion

    -

    Summary of the main points

    -

    In conclusion, Red Ball 4 Mod 100 Square APK is a fun and challenging game for Android users who love physics-based platformers. The game has four episodes with 15 levels each, plus a level editor where you can create your own levels. The mod gives you unlimited lives and coins, customizable ball appearance, epic boss battles, cloud save and HID controller support. The mod is easy to download and install, as we have shown you in this article.

    -

    Call to action

    -

    If you are ready to have some fun and challenge yourself with Red Ball 4 Mod 100 Square APK, don't hesitate any longer. Download the mod now and start playing this amazing game. You will not regret it!

    -

    Frequently Asked Questions

    -

    Q: Is Red Ball 4 Mod 100 Square APK free?

    -

    A: Yes, Red Ball 4 Mod 100 Square APK is free to download and play. You don't need to pay anything to enjoy this game.

    -

    Q: Is Red Ball 4 Mod 100 Square APK safe?

    -

    A: Yes, Red Ball 4 Mod 100 Square APK is safe to use. It does not contain any viruses or malware that could harm your device or data.

    -

    Q: How can I share my levels with other players?

    -

    A: You can share your levels with other players by using the level editor feature in the game. You can create your own levels using various objects and obstacles, and then upload them online for others to play.

    -

    Q: How can I update Red Ball 4 Mod 100 Square APK?

    -

    A: You can update Red Ball 4 Mod 100 Square APK by downloading the latest version of the apk file from the same source that you used before. You don't need to uninstall the previous version, just install the new one over it.

    -

    Q: What are some tips and tricks for playing Red Ball 4 Mod 100 Square APK?

    -

    A: Some tips and tricks for playing Red Ball 4 Mod 100 Square APK are:

    - -

    I hope these tips and tricks will help you enjoy Red Ball 4 Mod 100 Square APK even more.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Dynamons World Dragon MOD APK Catch Train and Evolve Your Dynamons.md b/spaces/1phancelerku/anime-remove-background/Dynamons World Dragon MOD APK Catch Train and Evolve Your Dynamons.md deleted file mode 100644 index d74c4638dfbe9ee137680c8c12b4d2063111913b..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Dynamons World Dragon MOD APK Catch Train and Evolve Your Dynamons.md +++ /dev/null @@ -1,99 +0,0 @@ -
    -

    Dynamons World Dragon Mod APK: A Guide for RPG Fans

    -

    If you are a fan of role-playing games (RPGs), you might have heard of Dynamons World, a popular and exciting game that lets you catch and train your own team of Dynamons, which are cute and powerful creatures that can fight in battles. But did you know that there is a way to make the game even more fun and rewarding? That's right, we are talking about Dynamons World Dragon Mod APK, a modified version of the original game that comes with amazing features and benefits. In this article, we will tell you everything you need to know about this mod, including what it is, how to download and install it, how to play it, and some tips and tricks to help you become the best RPG battle master in the Dynamons Kingdom.

    -

    dynamons world dragon mod apk


    Download Ziphttps://jinyurl.com/2uNPMa



    -

    What is Dynamons World?

    -

    Before we dive into the details of the mod, let's first review what Dynamons World is all about. Dynamons World is an addicting, action-adventure game that puts you in the role of a Dynamons master. You can catch and train dozens of unique Dynamons, each with their own skills and abilities, and use them to fight against other players in online multiplayer battles. You can also explore an open world full of secrets, quests, and challenges, and fight against tough Captains and bosses to prove your skills.

    -

    A fun and addictive RPG game

    -

    Dynamons World is a game that will keep you hooked for hours, as you discover new Dynamons, level them up, evolve them, and equip them with skill cards that enhance their performance in battle. You can also customize your character with different outfits and accessories, and collect badges and trophies as you complete achievements. The game has a captivating storyline that takes you from the Dynamons Camp to the Temple Ruins, where you will face the ultimate enemy, Zenix.

    -

    A huge open world to explore

    -

    Dynamons World is not just about battles, it's also about adventure. The game has a huge open world that you can explore at your own pace, finding hidden items, secrets, and surprises along the way. You can visit different locations, such as forests, deserts, caves, islands, volcanoes, and more, each with their own unique environments and Dynamons. You can also interact with other characters, such as trainers, shopkeepers, scientists, and villagers, who will give you quests, tips, or items.

    -

    A variety of Dynamons to catch and train

    -

    One of the most appealing aspects of Dynamons World is the diversity of Dynamons that you can catch and train. There are over 50 different types of Dynamons in the game, each belonging to one of six elements: fire, water, plant, electric, dark, or dragon. Each element has its own strengths and weaknesses against other elements, so you need to choose your team wisely. Some Dynamons are more common than others, while some are very rare and hard to find. You can also evolve your Dynamons into more powerful forms when they reach a certain level.

    -

    What is Dynamons World Dragon Mod APK?

    -

    Now that you have a clear idea of what Dynamons World is all about, let's talk about what makes the mod version different from the original one. Dynamons World Dragon Mod APK is a modified version of the original game that

    buff, debuff, etc. You can also switch your Dynamon with another one from your team by using the swap icon on the bottom left corner of the screen. -

  • The game uses an elemental system, where each Dynamon and skill belongs to one of six elements: fire, water, plant, electric, dark, or dragon. Each element has its own strengths and weaknesses against other elements, as shown in the table below:
  • - - | Element | Strong against | Weak against | |---------|----------------|--------------| | Fire | Plant, Dark | Water, Dragon| | Water | Fire, Dragon | Plant, Electric| | Plant | Water, Electric| Fire, Dark | | Electric| Water, Dragon | Plant, Dark | | Dark | Plant, Electric| Fire, Dragon | | Dragon | Fire, Dark | Water, Electric|

    You can use this table to plan your strategy and choose the best Dynamon and skill for each situation. For example, if you are facing a water Dynamon, you should use a plant or electric Dynamon and skill to deal more damage and take less damage. -

  • The game also has a stamina system, where each skill consumes a certain amount of stamina points (SP) when used. You can see the SP cost of each skill on the bottom right corner of the skill icon. You can also see the remaining SP of your Dynamon on the top left corner of the screen. You need to manage your SP wisely and avoid running out of it during a battle. You can restore your SP by using potions or by resting at the camp.
  • -
  • The game has a level system, where your Dynamons gain experience points (XP) when they win battles or complete quests. When they gain enough XP, they level up and increase their stats, such as health, attack, defense, and speed. You can see the XP and level of your Dynamon on the top left corner of the screen. You can also see the XP bar that shows how much XP you need to reach the next level.
  • -
  • The game has an evolution system, where some Dynamons can evolve into more powerful forms when they reach a certain level. You can see if your Dynamon can evolve by tapping on it on the team menu and checking the evolution icon on the bottom right corner of the screen. You can also see what level and form your Dynamon will evolve into. When your Dynamon evolves, it changes its appearance and gains new skills and abilities.
  • -
  • The game has a skill card system, where you can equip your Dynamons with skill cards that enhance their skills in battle. You can find skill cards by winning battles, completing quests, or buying them from the shop. You can equip up to four skill cards per Dynamon by tapping on it on the team menu and selecting the skill card icon on the bottom left corner of the screen. You can also see the effect of each skill card on the skill icon.
  • - -

    Best dragon Dynamons to use

    -

    If you are playing with the mod version of Dynamons World, you will have access to exclusive dragon Dynamons that are not available in the original version. These dragon Dynamons are very powerful and rare, and they have special skills and abilities that make them stand out from other Dynamons. Here are some of the best dragon Dynamons to use in your team:

    -

    dynamons world unlimited money and dragon mod apk
    -dynamons world mod apk latest version with dragon
    -download dynamons world dragon mod apk for android
    -dynamons world hack mod apk unlock all dragons
    -how to install dynamons world dragon mod apk
    -dynamons world mod apk free shopping and dragon
    -dynamons world mod apk offline with dragon
    -dynamons world mod apk unlimited coins and gems and dragon
    -dynamons world mod apk revdl with dragon
    -dynamons world mod apk rexdl with dragon
    -dynamons world mod apk happymod with dragon
    -dynamons world mod apk android 1 with dragon
    -dynamons world mod apk 2023 with dragon
    -dynamons world mod apk no root with dragon
    -dynamons world mod apk pure with dragon
    -dynamons world mod apk vip with dragon
    -dynamons world mod apk new update with dragon
    -dynamons world mod apk old version with dragon
    -dynamons world mod apk online with dragon
    -dynamons world mod apk original with dragon
    -dynamons world mod apk unlimited everything and dragon
    -dynamons world mod apk unlimited energy and dragon
    -dynamons world mod apk unlimited health and dragon
    -dynamons world mod apk unlimited skills and dragon
    -dynamons world mod apk unlimited stars and dragon
    -dynamons world mod apk mega with dragon
    -dynamons world mod apk premium with dragon
    -dynamons world mod apk pro with dragon
    -dynamons world mod apk full with dragon
    -dynamons world mod apk cracked with dragon
    -dynamons world mod apk cheat with dragon
    -dynamons world mod apk generator with dragon
    -dynamons world mod apk trainer with dragon
    -dynamons world mod apk editor with dragon
    -dynamons world mod apk obb with dragon
    -dynamons world mod apk data with dragon
    -dynamons world mod apk file with dragon
    -dynamons world mod apk zip with dragon
    -dynamons world mod apk rar with dragon
    -dynamons world mod apk mediafire with dragon
    -dynamons world mod apk google drive with dragon
    -dynamons world mod apk zippyshare with dragon
    -dynamons world mod apk 4shared with dragon
    -dynamons world mod apk mega.nz with dragon
    -dynamons world mod apk dropbox with dragon
    -dynamons world mod apk uptodown with dragon
    -dynamons world mod apk apkpure with dragon
    -dynamons world mod apk apkmirror with dragon

    - -

    Online multiplayer mode

    -

    One of the most exciting features of Dynamons World is the online multiplayer mode, where you can challenge other players from around the world in real-time battles. You can use the online icon on the top left corner of the screen to access the online mode. You can choose to play in one of three modes: ranked, friendly, or tournament. In ranked mode, you can compete with other players for points and ranks. In friendly mode, you can play with your friends or random players for fun. In tournament mode, you can join or create a tournament and fight for prizes and glory. You can also chat with other players, send them gifts, or add them as friends.

    -

    Conclusion

    -

    Dynamons World is a fun and addictive RPG game that lets you catch and train your own team of Dynamons and fight against other players in online multiplayer battles. You can also explore a huge open world full of secrets, quests, and challenges, and fight against tough Captains and bosses to prove your skills. If you want to make the game even more fun and rewarding, you can try Dynamons World Dragon Mod APK, a modified version of the original game that comes with amazing features and benefits, such as unlimited money and gems, unlocked everything, mod menu and free shopping, and exclusive dragon Dynamons. You can download and install the mod easily and safely by following the steps in this article. You can also use the tips and tricks in this article to help you play the mod and become the best RPG battle master in the Dynamons Kingdom.

    -

    FAQs

    -

    Here are some of the frequently asked questions about Dynamons World Dragon Mod APK:

    -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/__init__.py b/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/4Taps/SadTalker/src/facerender/modules/mapping.py b/spaces/4Taps/SadTalker/src/facerender/modules/mapping.py deleted file mode 100644 index 0e3a1c2d1770996080c08e9daafb346f05d7bcdd..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/facerender/modules/mapping.py +++ /dev/null @@ -1,47 +0,0 @@ -import numpy as np - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class MappingNet(nn.Module): - def __init__(self, coeff_nc, descriptor_nc, layer, num_kp, num_bins): - super( MappingNet, self).__init__() - - self.layer = layer - nonlinearity = nn.LeakyReLU(0.1) - - self.first = nn.Sequential( - torch.nn.Conv1d(coeff_nc, descriptor_nc, kernel_size=7, padding=0, bias=True)) - - for i in range(layer): - net = nn.Sequential(nonlinearity, - torch.nn.Conv1d(descriptor_nc, descriptor_nc, kernel_size=3, padding=0, dilation=3)) - setattr(self, 'encoder' + str(i), net) - - self.pooling = nn.AdaptiveAvgPool1d(1) - self.output_nc = descriptor_nc - - self.fc_roll = nn.Linear(descriptor_nc, num_bins) - self.fc_pitch = nn.Linear(descriptor_nc, num_bins) - self.fc_yaw = nn.Linear(descriptor_nc, num_bins) - self.fc_t = nn.Linear(descriptor_nc, 3) - self.fc_exp = nn.Linear(descriptor_nc, 3*num_kp) - - def forward(self, input_3dmm): - out = self.first(input_3dmm) - for i in range(self.layer): - model = getattr(self, 'encoder' + str(i)) - out = model(out) + out[:,:,3:-3] - out = self.pooling(out) - out = out.view(out.shape[0], -1) - #print('out:', out.shape) - - yaw = self.fc_yaw(out) - pitch = self.fc_pitch(out) - roll = self.fc_roll(out) - t = self.fc_t(out) - exp = self.fc_exp(out) - - return {'yaw': yaw, 'pitch': pitch, 'roll': roll, 't': t, 'exp': exp} \ No newline at end of file diff --git a/spaces/AIBoy1993/segment_anything_webui/README.md b/spaces/AIBoy1993/segment_anything_webui/README.md deleted file mode 100644 index 5cecd18708e43eede5fa841674cc707d63755e3a..0000000000000000000000000000000000000000 --- a/spaces/AIBoy1993/segment_anything_webui/README.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Segment Anything -emoji: 🚀 -colorFrom: gray -colorTo: pink -sdk: gradio -sdk_version: 3.25.0 -app_file: app.py -pinned: false ---- - -# Segment Anything WebUI - -[![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm.svg)](https://huggingface.co/spaces/AIBoy1993/segment_anything_webui?duplicate=true) -[![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm-dark.svg)](https://huggingface.co/spaces/AIBoy1993/segment_anything_webui?duplicate=true) - -This project is based on **[Segment Anything Model](https://segment-anything.com/)** by Meta. The UI is based on [Gradio](https://gradio.app/). - -- Try deme on HF: [AIBoy1993/segment_anything_webui](https://huggingface.co/spaces/AIBoy1993/segment_anything_webui) -- [GitHub](https://github.com/5663015/segment_anything_webui) - -![](./images/20230408023615.png) - -## Change Logs - -- [2023-4-11] - - Support video segmentation. A short video can be automatically segmented by SAM. - - Support text prompt segmentation using [OWL-ViT](https://huggingface.co/docs/transformers/v4.27.2/en/model_doc/owlvit#overview) (Vision Transformer for Open-World Localization) model. - - -## **Usage** - -Following usage is running on your computer. - -- Install Segment Anything([more details about install Segment Anything](https://github.com/facebookresearch/segment-anything#installation)): - -``` -pip install git+https://github.com/facebookresearch/segment-anything.git -``` - -- `git clone` this repository: - -``` -git clone https://github.com/5663015/segment_anything_webui.git -``` - -- Make a new folder named `checkpoints` under this project,and put the downloaded weights files in `checkpoints`。You can download the weights using following URLs: - - - `vit_h`: [ViT-H SAM model](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth) - - - `vit_l`: [ViT-L SAM model](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth) - - - `vit_b`: [ViT-B SAM model](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth) - -- Under `checkpoints`, make a new folder named `models--google--owlvit-base-patch32`, and put the downloaded [OWL-ViT weights](https://huggingface.co/google/owlvit-base-patch32) files in `models--google--owlvit-base-patch32`. -- Run: - -``` -python app.py -``` - -**Note:** Default model is `vit_b`,the demo can run on CPU. Default device is `cpu`。 - -## TODO - -- [x] Video segmentation - -- [x] Add text prompt - -- [ ] Add segmentation prompt (point and box) - -## Reference - -- Thanks to the wonderful work [Segment Anything](https://segment-anything.com/) and [OWL-ViT](https://arxiv.org/abs/2205.06230) -- Some video processing code references [kadirnar/segment-anything-video](https://github.com/kadirnar/segment-anything-video), and some OWL-ViT code references [ngthanhtin/owlvit_segment_anything](https://github.com/ngthanhtin/owlvit_segment_anything). - diff --git a/spaces/AIConsultant/MusicGen/audiocraft/grids/compression/debug.py b/spaces/AIConsultant/MusicGen/audiocraft/grids/compression/debug.py deleted file mode 100644 index 5612ff5688d85fede0e605b244919e8081cb1da9..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/audiocraft/grids/compression/debug.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Grid search file, simply list all the exp you want in `explorer`. -Any new exp added there will be scheduled. -You can cancel and experiment by commenting its line. - -This grid is a minimal example for debugging compression task -and how to override parameters directly in a grid. -Learn more about dora grids: https://github.com/facebookresearch/dora -""" - -from ._explorers import CompressionExplorer -from ...environment import AudioCraftEnvironment - - -@CompressionExplorer -def explorer(launcher): - partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global']) - launcher.slurm_(gpus=2, partition=partitions) - launcher.bind_(solver='compression/debug') - - with launcher.job_array(): - # base debug task using config from solver=compression/debug - launcher() - # we can override parameters in the grid to launch additional xps - launcher({'rvq.bins': 2048, 'rvq.n_q': 4}) diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/models/parallel_wavegan.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/models/parallel_wavegan.py deleted file mode 100644 index c63b59f67aa48342179415c1d1beac68574a5498..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/models/parallel_wavegan.py +++ /dev/null @@ -1,434 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2019 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -"""Parallel WaveGAN Modules.""" - -import logging -import math - -import torch -from torch import nn - -from modules.parallel_wavegan.layers import Conv1d -from modules.parallel_wavegan.layers import Conv1d1x1 -from modules.parallel_wavegan.layers import ResidualBlock -from modules.parallel_wavegan.layers import upsample -from modules.parallel_wavegan import models - - -class ParallelWaveGANGenerator(torch.nn.Module): - """Parallel WaveGAN Generator module.""" - - def __init__(self, - in_channels=1, - out_channels=1, - kernel_size=3, - layers=30, - stacks=3, - residual_channels=64, - gate_channels=128, - skip_channels=64, - aux_channels=80, - aux_context_window=2, - dropout=0.0, - bias=True, - use_weight_norm=True, - use_causal_conv=False, - upsample_conditional_features=True, - upsample_net="ConvInUpsampleNetwork", - upsample_params={"upsample_scales": [4, 4, 4, 4]}, - use_pitch_embed=False, - ): - """Initialize Parallel WaveGAN Generator module. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - kernel_size (int): Kernel size of dilated convolution. - layers (int): Number of residual block layers. - stacks (int): Number of stacks i.e., dilation cycles. - residual_channels (int): Number of channels in residual conv. - gate_channels (int): Number of channels in gated conv. - skip_channels (int): Number of channels in skip conv. - aux_channels (int): Number of channels for auxiliary feature conv. - aux_context_window (int): Context window size for auxiliary feature. - dropout (float): Dropout rate. 0.0 means no dropout applied. - bias (bool): Whether to use bias parameter in conv layer. - use_weight_norm (bool): Whether to use weight norm. - If set to true, it will be applied to all of the conv layers. - use_causal_conv (bool): Whether to use causal structure. - upsample_conditional_features (bool): Whether to use upsampling network. - upsample_net (str): Upsampling network architecture. - upsample_params (dict): Upsampling network parameters. - - """ - super(ParallelWaveGANGenerator, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.aux_channels = aux_channels - self.layers = layers - self.stacks = stacks - self.kernel_size = kernel_size - - # check the number of layers and stacks - assert layers % stacks == 0 - layers_per_stack = layers // stacks - - # define first convolution - self.first_conv = Conv1d1x1(in_channels, residual_channels, bias=True) - - # define conv + upsampling network - if upsample_conditional_features: - upsample_params.update({ - "use_causal_conv": use_causal_conv, - }) - if upsample_net == "MelGANGenerator": - assert aux_context_window == 0 - upsample_params.update({ - "use_weight_norm": False, # not to apply twice - "use_final_nonlinear_activation": False, - }) - self.upsample_net = getattr(models, upsample_net)(**upsample_params) - else: - if upsample_net == "ConvInUpsampleNetwork": - upsample_params.update({ - "aux_channels": aux_channels, - "aux_context_window": aux_context_window, - }) - self.upsample_net = getattr(upsample, upsample_net)(**upsample_params) - else: - self.upsample_net = None - - # define residual blocks - self.conv_layers = torch.nn.ModuleList() - for layer in range(layers): - dilation = 2 ** (layer % layers_per_stack) - conv = ResidualBlock( - kernel_size=kernel_size, - residual_channels=residual_channels, - gate_channels=gate_channels, - skip_channels=skip_channels, - aux_channels=aux_channels, - dilation=dilation, - dropout=dropout, - bias=bias, - use_causal_conv=use_causal_conv, - ) - self.conv_layers += [conv] - - # define output layers - self.last_conv_layers = torch.nn.ModuleList([ - torch.nn.ReLU(inplace=True), - Conv1d1x1(skip_channels, skip_channels, bias=True), - torch.nn.ReLU(inplace=True), - Conv1d1x1(skip_channels, out_channels, bias=True), - ]) - - self.use_pitch_embed = use_pitch_embed - if use_pitch_embed: - self.pitch_embed = nn.Embedding(300, aux_channels, 0) - self.c_proj = nn.Linear(2 * aux_channels, aux_channels) - - # apply weight norm - if use_weight_norm: - self.apply_weight_norm() - - def forward(self, x, c=None, pitch=None, **kwargs): - """Calculate forward propagation. - - Args: - x (Tensor): Input noise signal (B, C_in, T). - c (Tensor): Local conditioning auxiliary features (B, C ,T'). - pitch (Tensor): Local conditioning pitch (B, T'). - - Returns: - Tensor: Output tensor (B, C_out, T) - - """ - # perform upsampling - if c is not None and self.upsample_net is not None: - if self.use_pitch_embed: - p = self.pitch_embed(pitch) - c = self.c_proj(torch.cat([c.transpose(1, 2), p], -1)).transpose(1, 2) - c = self.upsample_net(c) - assert c.size(-1) == x.size(-1), (c.size(-1), x.size(-1)) - - # encode to hidden representation - x = self.first_conv(x) - skips = 0 - for f in self.conv_layers: - x, h = f(x, c) - skips += h - skips *= math.sqrt(1.0 / len(self.conv_layers)) - - # apply final layers - x = skips - for f in self.last_conv_layers: - x = f(x) - - return x - - def remove_weight_norm(self): - """Remove weight normalization module from all of the layers.""" - def _remove_weight_norm(m): - try: - logging.debug(f"Weight norm is removed from {m}.") - torch.nn.utils.remove_weight_norm(m) - except ValueError: # this module didn't have weight norm - return - - self.apply(_remove_weight_norm) - - def apply_weight_norm(self): - """Apply weight normalization module from all of the layers.""" - def _apply_weight_norm(m): - if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d): - torch.nn.utils.weight_norm(m) - logging.debug(f"Weight norm is applied to {m}.") - - self.apply(_apply_weight_norm) - - @staticmethod - def _get_receptive_field_size(layers, stacks, kernel_size, - dilation=lambda x: 2 ** x): - assert layers % stacks == 0 - layers_per_cycle = layers // stacks - dilations = [dilation(i % layers_per_cycle) for i in range(layers)] - return (kernel_size - 1) * sum(dilations) + 1 - - @property - def receptive_field_size(self): - """Return receptive field size.""" - return self._get_receptive_field_size(self.layers, self.stacks, self.kernel_size) - - -class ParallelWaveGANDiscriminator(torch.nn.Module): - """Parallel WaveGAN Discriminator module.""" - - def __init__(self, - in_channels=1, - out_channels=1, - kernel_size=3, - layers=10, - conv_channels=64, - dilation_factor=1, - nonlinear_activation="LeakyReLU", - nonlinear_activation_params={"negative_slope": 0.2}, - bias=True, - use_weight_norm=True, - ): - """Initialize Parallel WaveGAN Discriminator module. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - kernel_size (int): Number of output channels. - layers (int): Number of conv layers. - conv_channels (int): Number of chnn layers. - dilation_factor (int): Dilation factor. For example, if dilation_factor = 2, - the dilation will be 2, 4, 8, ..., and so on. - nonlinear_activation (str): Nonlinear function after each conv. - nonlinear_activation_params (dict): Nonlinear function parameters - bias (bool): Whether to use bias parameter in conv. - use_weight_norm (bool) Whether to use weight norm. - If set to true, it will be applied to all of the conv layers. - - """ - super(ParallelWaveGANDiscriminator, self).__init__() - assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." - assert dilation_factor > 0, "Dilation factor must be > 0." - self.conv_layers = torch.nn.ModuleList() - conv_in_channels = in_channels - for i in range(layers - 1): - if i == 0: - dilation = 1 - else: - dilation = i if dilation_factor == 1 else dilation_factor ** i - conv_in_channels = conv_channels - padding = (kernel_size - 1) // 2 * dilation - conv_layer = [ - Conv1d(conv_in_channels, conv_channels, - kernel_size=kernel_size, padding=padding, - dilation=dilation, bias=bias), - getattr(torch.nn, nonlinear_activation)(inplace=True, **nonlinear_activation_params) - ] - self.conv_layers += conv_layer - padding = (kernel_size - 1) // 2 - last_conv_layer = Conv1d( - conv_in_channels, out_channels, - kernel_size=kernel_size, padding=padding, bias=bias) - self.conv_layers += [last_conv_layer] - - # apply weight norm - if use_weight_norm: - self.apply_weight_norm() - - def forward(self, x): - """Calculate forward propagation. - - Args: - x (Tensor): Input noise signal (B, 1, T). - - Returns: - Tensor: Output tensor (B, 1, T) - - """ - for f in self.conv_layers: - x = f(x) - return x - - def apply_weight_norm(self): - """Apply weight normalization module from all of the layers.""" - def _apply_weight_norm(m): - if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d): - torch.nn.utils.weight_norm(m) - logging.debug(f"Weight norm is applied to {m}.") - - self.apply(_apply_weight_norm) - - def remove_weight_norm(self): - """Remove weight normalization module from all of the layers.""" - def _remove_weight_norm(m): - try: - logging.debug(f"Weight norm is removed from {m}.") - torch.nn.utils.remove_weight_norm(m) - except ValueError: # this module didn't have weight norm - return - - self.apply(_remove_weight_norm) - - -class ResidualParallelWaveGANDiscriminator(torch.nn.Module): - """Parallel WaveGAN Discriminator module.""" - - def __init__(self, - in_channels=1, - out_channels=1, - kernel_size=3, - layers=30, - stacks=3, - residual_channels=64, - gate_channels=128, - skip_channels=64, - dropout=0.0, - bias=True, - use_weight_norm=True, - use_causal_conv=False, - nonlinear_activation="LeakyReLU", - nonlinear_activation_params={"negative_slope": 0.2}, - ): - """Initialize Parallel WaveGAN Discriminator module. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - kernel_size (int): Kernel size of dilated convolution. - layers (int): Number of residual block layers. - stacks (int): Number of stacks i.e., dilation cycles. - residual_channels (int): Number of channels in residual conv. - gate_channels (int): Number of channels in gated conv. - skip_channels (int): Number of channels in skip conv. - dropout (float): Dropout rate. 0.0 means no dropout applied. - bias (bool): Whether to use bias parameter in conv. - use_weight_norm (bool): Whether to use weight norm. - If set to true, it will be applied to all of the conv layers. - use_causal_conv (bool): Whether to use causal structure. - nonlinear_activation_params (dict): Nonlinear function parameters - - """ - super(ResidualParallelWaveGANDiscriminator, self).__init__() - assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." - - self.in_channels = in_channels - self.out_channels = out_channels - self.layers = layers - self.stacks = stacks - self.kernel_size = kernel_size - - # check the number of layers and stacks - assert layers % stacks == 0 - layers_per_stack = layers // stacks - - # define first convolution - self.first_conv = torch.nn.Sequential( - Conv1d1x1(in_channels, residual_channels, bias=True), - getattr(torch.nn, nonlinear_activation)( - inplace=True, **nonlinear_activation_params), - ) - - # define residual blocks - self.conv_layers = torch.nn.ModuleList() - for layer in range(layers): - dilation = 2 ** (layer % layers_per_stack) - conv = ResidualBlock( - kernel_size=kernel_size, - residual_channels=residual_channels, - gate_channels=gate_channels, - skip_channels=skip_channels, - aux_channels=-1, - dilation=dilation, - dropout=dropout, - bias=bias, - use_causal_conv=use_causal_conv, - ) - self.conv_layers += [conv] - - # define output layers - self.last_conv_layers = torch.nn.ModuleList([ - getattr(torch.nn, nonlinear_activation)( - inplace=True, **nonlinear_activation_params), - Conv1d1x1(skip_channels, skip_channels, bias=True), - getattr(torch.nn, nonlinear_activation)( - inplace=True, **nonlinear_activation_params), - Conv1d1x1(skip_channels, out_channels, bias=True), - ]) - - # apply weight norm - if use_weight_norm: - self.apply_weight_norm() - - def forward(self, x): - """Calculate forward propagation. - - Args: - x (Tensor): Input noise signal (B, 1, T). - - Returns: - Tensor: Output tensor (B, 1, T) - - """ - x = self.first_conv(x) - - skips = 0 - for f in self.conv_layers: - x, h = f(x, None) - skips += h - skips *= math.sqrt(1.0 / len(self.conv_layers)) - - # apply final layers - x = skips - for f in self.last_conv_layers: - x = f(x) - return x - - def apply_weight_norm(self): - """Apply weight normalization module from all of the layers.""" - def _apply_weight_norm(m): - if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d): - torch.nn.utils.weight_norm(m) - logging.debug(f"Weight norm is applied to {m}.") - - self.apply(_apply_weight_norm) - - def remove_weight_norm(self): - """Remove weight normalization module from all of the layers.""" - def _remove_weight_norm(m): - try: - logging.debug(f"Weight norm is removed from {m}.") - torch.nn.utils.remove_weight_norm(m) - except ValueError: # this module didn't have weight norm - return - - self.apply(_remove_weight_norm) diff --git a/spaces/Abhaykoul/Wikipedia/README.md b/spaces/Abhaykoul/Wikipedia/README.md deleted file mode 100644 index 2cca8fb7b7f0e83e48bc816eb16a1c3a408383c5..0000000000000000000000000000000000000000 --- a/spaces/Abhaykoul/Wikipedia/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Wikipedia -emoji: 👁 -colorFrom: yellow -colorTo: pink -sdk: streamlit -sdk_version: 1.28.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Abhilashvj/planogram-compliance/app.py b/spaces/Abhilashvj/planogram-compliance/app.py deleted file mode 100644 index 82ee449b9f4cd4a50fcb4986565cdf769dcd6538..0000000000000000000000000000000000000000 --- a/spaces/Abhilashvj/planogram-compliance/app.py +++ /dev/null @@ -1,296 +0,0 @@ -# https://planogram-compliance.herokuapp.com/ -# https://dashboard.heroku.com/apps/planogram-compliance/deploy/heroku-git - -# https://medium.com/@mohcufe/how-to-deploy-your-trained-pytorch-model-on-heroku-ff4b73085ddd\ -# https://stackoverflow.com/questions/51730880/where-do-i-get-a-cpu-only-version-of-pytorch -# https://blog.jcharistech.com/2020/02/26/how-to-deploy-a-face-detection-streamlit-app-on-heroku/ -# https://towardsdatascience.com/a-quick-tutorial-on-how-to-deploy-your-streamlit-app-to-heroku- -# https://www.analyticsvidhya.com/blog/2021/06/deploy-your-ml-dl-streamlit-application-on-heroku/ -# https://gist.github.com/jeremyjordan/6b506257509e8ba673f145baa568a1ea - -import json - -# https://www.r-bloggers.com/2020/12/creating-a-streamlit-web-app-building-with-docker-github-actions-and-hosting-on-heroku/ -# https://devcenter.heroku.com/articles/container-registry-and-runtime -# from yolo_inference_util import run_yolo_v5 -import os -from tempfile import NamedTemporaryFile - -import cv2 -import numpy as np -import pandas as pd -import streamlit as st - -# import matplotlib.pyplot as plt -from app_utils import annotate_planogram_compliance, bucket_sort, do_sorting, xml_to_csv -from inference import run - -# from utils.plots import Annotator, colors -# from utils.general import scale_coords - -app_formal_name = "Planogram Compliance" - -FILE_UPLOAD_DIR = "tmp" - -os.makedirs(FILE_UPLOAD_DIR, exist_ok=True) -# Start the app in wide-mode -st.set_page_config( - layout="wide", - page_title=app_formal_name, -) -# https://github.com/streamlit/streamlit/issues/1361 -uploaded_file = st.file_uploader( - "Choose a planogram image to score", - type=["jpg", "JPEG", "PNG", "JPG", "jpeg"], -) -uploaded_master_planogram_file = st.file_uploader( - "Upload a master planogram", type=["jpg", "JPEG", "PNG", "JPG", "jpeg"] -) -annotation_file = st.file_uploader("upload master polanogram", type=["xml"]) -temp_file = NamedTemporaryFile(delete=False) - -target_names = [ - "Bottle,100PLUS ACTIVE 1.5L", - "Bottle,100PLUS ACTIVE 500ML", - "Bottle,100PLUS LEMON LIME 1.5L", - "Bottle,100PLUS ORANGE 500ML", - "Bottle,100PLUS ORIGINAL 1.5L", - "Bottle,100PLUS TANGY ORANGE 1.5L", - "Bottle,100PLUS ZERO 1.5L", - "Bottle,100PLUS ZERO 500ML", - "Packet,F:M MAGNOLIA CHOC 1L", - "Bottle,F&N GINGER ADE 1.5L", - "Bottle,F&N GRAPE 1.5L", - "Bottle,F&N ICE CREAM SODA 1.5L", - "Bottle,F&N LYCHEE PEAR 1.5L", - "Bottle,F&N ORANGE 1.5L", - "Bottle,F&N PINEAPPLE PET 1.5L", - "Bottle,F&N SARSI 1.5L", - "Bottle,F&N SS ICE LEM TEA RS 500ML", - "Bottle,F&N SS ICE LEMON TEA RS 1.5L", - "Bottle,F&N SS ICE LEMON TEA 1.5L", - "Bottle,F&N SS ICE LEMON TEA 500ML", - "Bottle,F&N SS ICE PEACH TEA 1.5L", - "Bottle,SS ICE LEMON GT 1.48L", - "Bottle,SS WHITE CHRYS TEA 1.48L", - "Packet,FARMHOUSE FRESH MILK 1L FNDM", - "Packet,FARMHOUSE PLAIN LF 1L", - "Packet,PURA FRESH MILK 1L FS", - "Packet,NUTRISOY REG NO SUGAR ADDED 1L", - "Packet,NUTRISOY PLAIN 475ML", - "Packet,NUTRISOY PLAIN 1L", - "Packet,NUTRISOY OMEGA RD SUGAR 1L", - "Packet,NUTRISOY OMEGA NSA 1L", - "Packet,NUTRISOY ALMOND 1L", - "Packet,MAGNOLIA FRESH MILK 1L FNDM", - "Packet,FM MAG FC PLAIN 200ML", - "Packet,MAG OMEGA PLUS PLAIN 200ML", - "Packet,MAG KURMA MILK 500ML", - "Packet,MAG KURMA MILK 1L", - "Packet,MAG CHOCOLATE FC 500ML", - "Packet,MAG BROWN SUGAR SS MILK 1L", - "Packet,FM MAG LFHC PLN 500ML", - "Packet,FM MAG LFHC OAT 500ML", - "Packet,FM MAG LFHC OAT 1L", - "Packet,FM MAG FC PLAIN 500ML", - "Void,PARTIAL VOID", - "Void,FULL VOID", - "Bottle,F&N SS ICE LEM TEA 500ML", -] - -run_app = st.button("Run the compliance check") -if run_app and uploaded_file is not None: - # Convert the file to an opencv image. - file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8) - temp_file.write(uploaded_file.getvalue()) - uploaded_img = cv2.imdecode(file_bytes, 1) - cv2.imwrite("tmp/to_score_planogram_tmp.png", uploaded_img) - - # if uploaded_master_planogram_file is None: - # master = cv2.imread('./sample_master_planogram.jpeg') - - names_dict = {name: id for id, name in enumerate(target_names)} - - sorted_xml_df = None - # https://discuss.streamlit.io/t/unable-to-read-files-using-standard-file-uploader/2258/2 - if uploaded_master_planogram_file and annotation_file: - file_bytes = np.asarray( - bytearray(uploaded_master_planogram_file.read()), dtype=np.uint8 - ) - master = cv2.imdecode(file_bytes, 1) - cv2.imwrite("tmp/master_tmp.png", master) - # cv2.imwrite("tmp_uploaded_master_planogram_img.png", master) - # xml = annotation_file.read() - # tmp_xml ="tmp_xml_annotation.xml" - # with open(tmp_xml ,'w',encoding='utf-8') as f: - # xml = f.write(xml) - xml_df = xml_to_csv(annotation_file) - xml_df["cls"] = xml_df["cls"].map(names_dict) - sorted_xml_df = do_sorting(xml_df) - sorted_xml_df.line_number.value_counts() - - line_data = sorted_xml_df.line_number.value_counts() - n_rows = int(len(line_data)) - n_cols = int(max(line_data)) - master_table = np.zeros((n_rows, n_cols)) + 101 - master_annotations = [] - for i, row in sorted_xml_df.groupby("line_number"): - # print(f"Adding products in the row {i} to the detected planogram", row.cls.tolist()) - products = row.cls.tolist() - master_table[int(i - 1), 0 : len(products)] = products - annotations = [ - (int(k), int(v)) - for k, v in list( - zip(row.cls.unique(), row.cls.value_counts().tolist()) - ) - ] - master_annotations.append(annotations) - master_table.shape - # print("Annoatated planogram") - # print(np.matrix(master_table)) - - elif uploaded_master_planogram_file: - print( - "Finding the amster annotations with the YOLOv5 model predictions" - ) - file_bytes = np.asarray( - bytearray(uploaded_master_planogram_file.read()), dtype=np.uint8 - ) - master = cv2.imdecode(file_bytes, 1) - cv2.imwrite("tmp/master_tmp.png", master) - master_results = run( - weights="base_line_best_model_exp5.pt", - source="tmp/master_tmp.png", - imgsz=[640, 640], - conf_thres=0.6, - iou_thres=0.6, - ) - - bb_df = pd.DataFrame( - master_results[0][1].tolist(), - columns=["xmin", "ymin", "xmax", "ymax", "conf", "cls"], - ) - sorted_df = do_sorting(bb_df) - - n_rows = int(sorted_df.line_number.max()) - n_cols = int( - sorted_df.groupby("line_number") - .size() - .reset_index(name="counts")["counts"] - .max() - ) - non_null_product = 101 - print("master size", n_rows, n_cols) - master_annotations = [] - master_table = np.zeros((int(n_rows), int(n_cols))) + non_null_product - for i, row in sorted_df.groupby("line_number"): - # print(f"Adding products in the row {i} to the detected planogram", row.cls.tolist()) - products = row.cls.tolist() - col_len = min(len(products), n_cols) - print("col size: ", col_len) - print("row size: ", i - 1) - if n_rows <= (i - 1): - print("more rows than expected in the predictions") - break - master_table[int(i - 1), 0:col_len] = products[:col_len] - annotations = [ - (int(k), int(v)) - for k, v in list( - zip(row.cls.unique(), row.cls.value_counts().tolist()) - ) - ] - master_annotations.append(annotations) - else: - master = cv2.imread("./sample_master_planogram.jpeg") - n_rows = 3 - n_cols = 16 - master_table = np.zeros((n_rows, n_cols)) + 101 - master_annotations = [ - [(32, 12), (8, 4)], - [(36, 1), (41, 6), (50, 4), (51, 3), (52, 2)], - [(23, 5), (24, 6), (54, 5)], - ] - - for i, row in enumerate(master_annotations): - idx = 0 - for product, count in row: - master_table[i, idx : idx + count] = product - idx = idx + count - # Now do something with the image! For example, let's display it: - # st.image(opencv_image, channels="BGR") - - # uploaded_img = '/content/drive/My Drive/0.CV/0.Planogram_Compliance/planogram_data/images/test/IMG_5718.jpg' - result_list = run( - weights="base_line_best_model_exp5.pt", - source="tmp/to_score_planogram_tmp.png", - imgsz=[640, 640], - conf_thres=0.6, - iou_thres=0.6, - ) - - bb_df = pd.DataFrame( - result_list[0][1].tolist(), - columns=["xmin", "ymin", "xmax", "ymax", "conf", "cls"], - ) - sorted_df = do_sorting(bb_df) - - non_null_product = 101 - print("master size", n_rows, n_cols) - detected_table = np.zeros((n_rows, n_cols)) + non_null_product - for i, row in sorted_df.groupby("line_number"): - # print(f"Adding products in the row {i} to the detected planogram", row.cls.tolist()) - products = row.cls.tolist() - col_len = min(len(products), n_cols) - print("col size: ", col_len) - print("row size: ", i - 1) - if n_rows <= (i - 1): - print("more rows than expected in the predictions") - break - detected_table[int(i - 1), 0:col_len] = products[:col_len] - - # score = (master_table == detected_table).sum() / (master_table != non_null_product).sum() - correct_matches = ( - np.ma.masked_equal(master_table, non_null_product) == detected_table - ).sum() - total_products = (master_table != non_null_product).sum() - score = correct_matches / total_products - # if sorted_xml_df is not None: - # annotate_df = sorted_xml_df[["xmin","ymin", "xmax", "ymax", "line_number","cls"]].astype(int) - # else: - annotate_df = sorted_df[ - ["xmin", "ymin", "xmax", "ymax", "line_number", "cls"] - ].astype(int) - - mask = master_table != non_null_product - m_detected_table = np.ma.masked_array(master_table, mask=mask) - m_annotated_table = np.ma.masked_array(detected_table, mask=mask) - - # wrong_indexes = np.ravel_multi_index(master_table*mask != detected_table*mask, master_table.shape) - wrong_indexes = np.where(master_table != detected_table) - correct_indexes = np.where(master_table == detected_table) - annotated_planogram = annotate_planogram_compliance( - uploaded_img, annotate_df, correct_indexes, wrong_indexes, target_names - ) - st.title("Target Products") - st.write(json.dumps(target_names)) - st.title("The master planogram annotation") - st.write( - "The annotations are based on the index of products from Target products list " - ) - st.write(json.dumps(master_annotations)) - - # https://github.com/streamlit/streamlit/issues/888 - st.image( - [master, annotated_planogram, result_list[0][0]], - width=512, - caption=[ - "Master planogram", - "Planogram Compliance", - "Planogram Predictions", - ], - channels="BGR", - ) - # st.image([master, annotated_planogram], width=512, caption=["Master planogram", "Planogram Compliance"], channels="BGR") - st.title("Planogram Compiance score") - # st.write(f"{correct_matches} / {total_products}") - st.write(score) diff --git a/spaces/AchyuthGamer/AchyuthGamer-OpenGPT/README.md b/spaces/AchyuthGamer/AchyuthGamer-OpenGPT/README.md deleted file mode 100644 index bbdf3aa0da9d22bad85e4b1c107b4fbb1de3a52d..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/AchyuthGamer-OpenGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AchyuthGamer OpenGPT -emoji: 🐠 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 3.45.2 -app_file: app.py -pinned: false -license: creativeml-openrail-m ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Ylokh.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Ylokh.py deleted file mode 100644 index 59da0fa404a4ad59c0117ae1e8f0af4d3b466fab..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Ylokh.py +++ /dev/null @@ -1,77 +0,0 @@ -from __future__ import annotations - -import json - -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider -from ..typing import AsyncResult, Messages - -class Ylokh(AsyncGeneratorProvider): - url = "https://chat.ylokh.xyz" - working = True - supports_gpt_35_turbo = True - - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - stream: bool = True, - proxy: str = None, - timeout: int = 120, - **kwargs - ) -> AsyncResult: - model = model if model else "gpt-3.5-turbo" - headers = { - "Origin" : cls.url, - "Referer": cls.url + "/", - } - data = { - "messages": messages, - "model": model, - "temperature": 1, - "presence_penalty": 0, - "top_p": 1, - "frequency_penalty": 0, - "allow_fallback": True, - "stream": stream, - **kwargs - } - async with StreamSession( - headers=headers, - proxies={"https": proxy}, - timeout=timeout - ) as session: - async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response: - response.raise_for_status() - if stream: - async for line in response.iter_lines(): - line = line.decode() - if line.startswith("data: "): - if line.startswith("data: [DONE]"): - break - line = json.loads(line[6:]) - content = line["choices"][0]["delta"].get("content") - if content: - yield content - else: - chat = await response.json() - yield chat["choices"][0]["message"].get("content") - - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ("timeout", "int"), - ("temperature", "float"), - ("top_p", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PostLayout.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PostLayout.js deleted file mode 100644 index 0c65af357d21f35058b8d63e466a4300365f18b2..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PostLayout.js +++ /dev/null @@ -1,7 +0,0 @@ -var PostLayout = function (parent, newWidth, newHeight) { - if (this._anchor) { - this._anchor.updatePosition(); - } - return this; -} -export default PostLayout; \ No newline at end of file diff --git a/spaces/AiMimicry/sovits-models/README.md b/spaces/AiMimicry/sovits-models/README.md deleted file mode 100644 index b2debfb030fe1e3f7564101640897f6c211675bb..0000000000000000000000000000000000000000 --- a/spaces/AiMimicry/sovits-models/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Sovits Models -emoji: 🎙️ -colorFrom: gray -colorTo: pink -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: zomehwh/sovits-models ---- diff --git a/spaces/Akmyradov/TurkmenSpeechRecogntion/app.py b/spaces/Akmyradov/TurkmenSpeechRecogntion/app.py deleted file mode 100644 index 162995e25643b03df654f8f41735a15b5f6e7804..0000000000000000000000000000000000000000 --- a/spaces/Akmyradov/TurkmenSpeechRecogntion/app.py +++ /dev/null @@ -1,53 +0,0 @@ -import gradio as gr -from transformers import Wav2Vec2ForCTC, AutoProcessor -import torch -import librosa -import json - -with open('ISO_codes.json', 'r') as file: - iso_codes = json.load(file) - -languages = list(iso_codes.keys()) - -model_id = "facebook/mms-1b-all" -processor = AutoProcessor.from_pretrained(model_id) -model = Wav2Vec2ForCTC.from_pretrained(model_id) - -def transcribe(audio_file_mic=None, audio_file_upload=None, language="Turkmen"): - if audio_file_mic: - audio_file = audio_file_mic - elif audio_file_upload: - audio_file = audio_file_upload - else: - return "Please upload an audio file or record one" - - # Make sure audio is 16kHz - speech, sample_rate = librosa.load(audio_file) - if sample_rate != 16000: - speech = librosa.resample(speech, orig_sr=sample_rate, target_sr=16000) - - # Keep the same model in memory and simply switch out the language adapters by calling load_adapter() for the model and set_target_lang() for the tokenizer - language_code = ("tuk-script_latin") - processor.tokenizer.set_target_lang(language_code) - model.load_adapter(language_code) - - inputs = processor(speech, sampling_rate=16_000, return_tensors="pt") - - with torch.no_grad(): - outputs = model(**inputs).logits - - ids = torch.argmax(outputs, dim=-1)[0] - transcription = processor.decode(ids) - return transcription - - - -iface = gr.Interface(fn=transcribe, - inputs=[ - gr.Audio(source="microphone", type="filepath", label="Sesiňi ýazdyr"), - gr.Audio(source="upload", type="filepath", label="Sesiňi ýükle"), - gr.Dropdown(choices=languages, label="Language", value="Türkmen") - ], - outputs=gr.Textbox(label="Transcription") - ) -iface.launch() \ No newline at end of file diff --git a/spaces/AlanMars/QYL-AI-Space/run_macOS.command b/spaces/AlanMars/QYL-AI-Space/run_macOS.command deleted file mode 100644 index 2d26597ae47519f42336ccffc16646713a192ae1..0000000000000000000000000000000000000000 --- a/spaces/AlanMars/QYL-AI-Space/run_macOS.command +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# 获取脚本所在目录 -script_dir=$(dirname "$(readlink -f "$0")") - -# 将工作目录更改为脚本所在目录 -cd "$script_dir" || exit - -# 检查Git仓库是否有更新 -git remote update -pwd - -if ! git status -uno | grep 'up to date' > /dev/null; then - # 如果有更新,关闭当前运行的服务器 - pkill -f ChuanhuChatbot.py - - # 拉取最新更改 - git pull - - # 安装依赖 - pip3 install -r requirements.txt - - # 重新启动服务器 - nohup python3 ChuanhuChatbot.py & -fi - -# 检查ChuanhuChatbot.py是否在运行 -if ! pgrep -f ChuanhuChatbot.py > /dev/null; then - # 如果没有运行,启动服务器 - nohup python3 ChuanhuChatbot.py & -fi diff --git a/spaces/AlexWang/lama/saicinpainting/evaluation/losses/fid/__init__.py b/spaces/AlexWang/lama/saicinpainting/evaluation/losses/fid/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Aloento/9Nine-VITS/load_checkpoint.py b/spaces/Aloento/9Nine-VITS/load_checkpoint.py deleted file mode 100644 index 04f9bd176a1828999208ad04747277c1937736a8..0000000000000000000000000000000000000000 --- a/spaces/Aloento/9Nine-VITS/load_checkpoint.py +++ /dev/null @@ -1,32 +0,0 @@ -import logging -import os - -import torch - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logging.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logging.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/encoders/helpers.py b/spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/encoders/helpers.py deleted file mode 100644 index c4a58b34ea5ca6912fe53c63dede0a8696f5c024..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/encoders/helpers.py +++ /dev/null @@ -1,140 +0,0 @@ -from collections import namedtuple -import torch -import torch.nn.functional as F -from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module - -""" -ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) -""" - - -class Flatten(Module): - def forward(self, input): - return input.view(input.size(0), -1) - - -def l2_norm(input, axis=1): - norm = torch.norm(input, 2, axis, True) - output = torch.div(input, norm) - return output - - -class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): - """ A named tuple describing a ResNet block. """ - - -def get_block(in_channel, depth, num_units, stride=2): - return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)] - - -def get_blocks(num_layers): - if num_layers == 50: - blocks = [ - get_block(in_channel=64, depth=64, num_units=3), - get_block(in_channel=64, depth=128, num_units=4), - get_block(in_channel=128, depth=256, num_units=14), - get_block(in_channel=256, depth=512, num_units=3) - ] - elif num_layers == 100: - blocks = [ - get_block(in_channel=64, depth=64, num_units=3), - get_block(in_channel=64, depth=128, num_units=13), - get_block(in_channel=128, depth=256, num_units=30), - get_block(in_channel=256, depth=512, num_units=3) - ] - elif num_layers == 152: - blocks = [ - get_block(in_channel=64, depth=64, num_units=3), - get_block(in_channel=64, depth=128, num_units=8), - get_block(in_channel=128, depth=256, num_units=36), - get_block(in_channel=256, depth=512, num_units=3) - ] - else: - raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers)) - return blocks - - -class SEModule(Module): - def __init__(self, channels, reduction): - super(SEModule, self).__init__() - self.avg_pool = AdaptiveAvgPool2d(1) - self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False) - self.relu = ReLU(inplace=True) - self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False) - self.sigmoid = Sigmoid() - - def forward(self, x): - module_input = x - x = self.avg_pool(x) - x = self.fc1(x) - x = self.relu(x) - x = self.fc2(x) - x = self.sigmoid(x) - return module_input * x - - -class bottleneck_IR(Module): - def __init__(self, in_channel, depth, stride): - super(bottleneck_IR, self).__init__() - if in_channel == depth: - self.shortcut_layer = MaxPool2d(1, stride) - else: - self.shortcut_layer = Sequential( - Conv2d(in_channel, depth, (1, 1), stride, bias=False), - BatchNorm2d(depth) - ) - self.res_layer = Sequential( - BatchNorm2d(in_channel), - Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), - Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth) - ) - - def forward(self, x): - shortcut = self.shortcut_layer(x) - res = self.res_layer(x) - return res + shortcut - - -class bottleneck_IR_SE(Module): - def __init__(self, in_channel, depth, stride): - super(bottleneck_IR_SE, self).__init__() - if in_channel == depth: - self.shortcut_layer = MaxPool2d(1, stride) - else: - self.shortcut_layer = Sequential( - Conv2d(in_channel, depth, (1, 1), stride, bias=False), - BatchNorm2d(depth) - ) - self.res_layer = Sequential( - BatchNorm2d(in_channel), - Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), - PReLU(depth), - Conv2d(depth, depth, (3, 3), stride, 1, bias=False), - BatchNorm2d(depth), - SEModule(depth, 16) - ) - - def forward(self, x): - shortcut = self.shortcut_layer(x) - res = self.res_layer(x) - return res + shortcut - - -def _upsample_add(x, y): - """Upsample and add two feature maps. - Args: - x: (Variable) top feature map to be upsampled. - y: (Variable) lateral feature map. - Returns: - (Variable) added feature map. - Note in PyTorch, when input size is odd, the upsampled feature map - with `F.upsample(..., scale_factor=2, mode='nearest')` - maybe not equal to the lateral feature map size. - e.g. - original input size: [N,_,15,15] -> - conv2d feature map size: [N,_,8,8] -> - upsampled feature map size: [N,_,16,16] - So we choose bilinear upsample which supports arbitrary output sizes. - """ - _, _, H, W = y.size() - return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/zh/index.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/zh/index.md deleted file mode 100644 index e1a2a3971d87ce823e4668662d65c2b55602b87f..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/zh/index.md +++ /dev/null @@ -1,101 +0,0 @@ - - -

    -
    - -
    -

    - -# 🧨 Diffusers - -🤗 Diffusers 是一个值得首选用于生成图像、音频甚至 3D 分子结构的,最先进的预训练扩散模型库。 -无论您是在寻找简单的推理解决方案,还是想训练自己的扩散模型,🤗 Diffusers 这一模块化工具箱都能对其提供支持。 -本库的设计更偏重于[可用而非高性能](conceptual/philosophy#usability-over-performance)、[简明而非简单](conceptual/philosophy#simple-over-easy)以及[易用而非抽象](conceptual/philosophy#tweakable-contributorfriendly-over-abstraction)。 - - -本库包含三个主要组件: - -- 最先进的扩散管道 [diffusion pipelines](api/pipelines/overview),只需几行代码即可进行推理。 -- 可交替使用的各种噪声调度器 [noise schedulers](api/schedulers/overview),用于平衡生成速度和质量。 -- 预训练模型 [models](api/models),可作为构建模块,并与调度程序结合使用,来创建您自己的端到端扩散系统。 - -
    - -
    - -## 🧨 Diffusers pipelines - -下表汇总了当前所有官方支持的pipelines及其对应的论文. - -| 管道 | 论文/仓库 | 任务 | -|---|---|:---:| -| [alt_diffusion](./api/pipelines/alt_diffusion) | [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679) | Image-to-Image Text-Guided Generation | -| [audio_diffusion](./api/pipelines/audio_diffusion) | [Audio Diffusion](https://github.com/teticio/audio-diffusion.git) | Unconditional Audio Generation | -| [controlnet](./api/pipelines/stable_diffusion/controlnet) | [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) | Image-to-Image Text-Guided Generation | -| [cycle_diffusion](./api/pipelines/cycle_diffusion) | [Unifying Diffusion Models' Latent Space, with Applications to CycleDiffusion and Guidance](https://arxiv.org/abs/2210.05559) | Image-to-Image Text-Guided Generation | -| [dance_diffusion](./api/pipelines/dance_diffusion) | [Dance Diffusion](https://github.com/williamberman/diffusers.git) | Unconditional Audio Generation | -| [ddpm](./api/pipelines/ddpm) | [Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2006.11239) | Unconditional Image Generation | -| [ddim](./api/pipelines/ddim) | [Denoising Diffusion Implicit Models](https://arxiv.org/abs/2010.02502) | Unconditional Image Generation | -| [if](./if) | [**IF**](./api/pipelines/if) | Image Generation | -| [if_img2img](./if) | [**IF**](./api/pipelines/if) | Image-to-Image Generation | -| [if_inpainting](./if) | [**IF**](./api/pipelines/if) | Image-to-Image Generation | -| [latent_diffusion](./api/pipelines/latent_diffusion) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)| Text-to-Image Generation | -| [latent_diffusion](./api/pipelines/latent_diffusion) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)| Super Resolution Image-to-Image | -| [latent_diffusion_uncond](./api/pipelines/latent_diffusion_uncond) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) | Unconditional Image Generation | -| [paint_by_example](./api/pipelines/paint_by_example) | [Paint by Example: Exemplar-based Image Editing with Diffusion Models](https://arxiv.org/abs/2211.13227) | Image-Guided Image Inpainting | -| [pndm](./api/pipelines/pndm) | [Pseudo Numerical Methods for Diffusion Models on Manifolds](https://arxiv.org/abs/2202.09778) | Unconditional Image Generation | -| [score_sde_ve](./api/pipelines/score_sde_ve) | [Score-Based Generative Modeling through Stochastic Differential Equations](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation | -| [score_sde_vp](./api/pipelines/score_sde_vp) | [Score-Based Generative Modeling through Stochastic Differential Equations](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation | -| [semantic_stable_diffusion](./api/pipelines/semantic_stable_diffusion) | [Semantic Guidance](https://arxiv.org/abs/2301.12247) | Text-Guided Generation | -| [stable_diffusion_text2img](./api/pipelines/stable_diffusion/text2img) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Text-to-Image Generation | -| [stable_diffusion_img2img](./api/pipelines/stable_diffusion/img2img) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Image-to-Image Text-Guided Generation | -| [stable_diffusion_inpaint](./api/pipelines/stable_diffusion/inpaint) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Text-Guided Image Inpainting | -| [stable_diffusion_panorama](./api/pipelines/stable_diffusion/panorama) | [MultiDiffusion](https://multidiffusion.github.io/) | Text-to-Panorama Generation | -| [stable_diffusion_pix2pix](./api/pipelines/stable_diffusion/pix2pix) | [InstructPix2Pix: Learning to Follow Image Editing Instructions](https://arxiv.org/abs/2211.09800) | Text-Guided Image Editing| -| [stable_diffusion_pix2pix_zero](./api/pipelines/stable_diffusion/pix2pix_zero) | [Zero-shot Image-to-Image Translation](https://pix2pixzero.github.io/) | Text-Guided Image Editing | -| [stable_diffusion_attend_and_excite](./api/pipelines/stable_diffusion/attend_and_excite) | [Attend-and-Excite: Attention-Based Semantic Guidance for Text-to-Image Diffusion Models](https://arxiv.org/abs/2301.13826) | Text-to-Image Generation | -| [stable_diffusion_self_attention_guidance](./api/pipelines/stable_diffusion/self_attention_guidance) | [Improving Sample Quality of Diffusion Models Using Self-Attention Guidance](https://arxiv.org/abs/2210.00939) | Text-to-Image Generation Unconditional Image Generation | -| [stable_diffusion_image_variation](./stable_diffusion/image_variation) | [Stable Diffusion Image Variations](https://github.com/LambdaLabsML/lambda-diffusers#stable-diffusion-image-variations) | Image-to-Image Generation | -| [stable_diffusion_latent_upscale](./stable_diffusion/latent_upscale) | [Stable Diffusion Latent Upscaler](https://twitter.com/StabilityAI/status/1590531958815064065) | Text-Guided Super Resolution Image-to-Image | -| [stable_diffusion_model_editing](./api/pipelines/stable_diffusion/model_editing) | [Editing Implicit Assumptions in Text-to-Image Diffusion Models](https://time-diffusion.github.io/) | Text-to-Image Model Editing | -| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-to-Image Generation | -| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Image Inpainting | -| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Depth-Conditional Stable Diffusion](https://github.com/Stability-AI/stablediffusion#depth-conditional-stable-diffusion) | Depth-to-Image Generation | -| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Super Resolution Image-to-Image | -| [stable_diffusion_safe](./api/pipelines/stable_diffusion_safe) | [Safe Stable Diffusion](https://arxiv.org/abs/2211.05105) | Text-Guided Generation | -| [stable_unclip](./stable_unclip) | Stable unCLIP | Text-to-Image Generation | -| [stable_unclip](./stable_unclip) | Stable unCLIP | Image-to-Image Text-Guided Generation | -| [stochastic_karras_ve](./api/pipelines/stochastic_karras_ve) | [Elucidating the Design Space of Diffusion-Based Generative Models](https://arxiv.org/abs/2206.00364) | Unconditional Image Generation | -| [text_to_video_sd](./api/pipelines/text_to_video) | [Modelscope's Text-to-video-synthesis Model in Open Domain](https://modelscope.cn/models/damo/text-to-video-synthesis/summary) | Text-to-Video Generation | -| [unclip](./api/pipelines/unclip) | [Hierarchical Text-Conditional Image Generation with CLIP Latents](https://arxiv.org/abs/2204.06125)(implementation by [kakaobrain](https://github.com/kakaobrain/karlo)) | Text-to-Image Generation | -| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Text-to-Image Generation | -| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Image Variations Generation | -| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Dual Image and Text Guided Generation | -| [vq_diffusion](./api/pipelines/vq_diffusion) | [Vector Quantized Diffusion Model for Text-to-Image Synthesis](https://arxiv.org/abs/2111.14822) | Text-to-Image Generation | diff --git a/spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 980f8191d4c07eb35e338bd87e3b73b06b3214ad..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 4), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_769x769_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index c6dac64377bb3f73fdf5c836fa9c38757f75ff76..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/ccnet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True), - test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/chase_db1.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/chase_db1.py deleted file mode 100644 index 8bc29bea14704a4407f83474610cbc3bef32c708..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/chase_db1.py +++ /dev/null @@ -1,27 +0,0 @@ -import os.path as osp - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class ChaseDB1Dataset(CustomDataset): - """Chase_db1 dataset. - - In segmentation map annotation for Chase_db1, 0 stands for background, - which is included in 2 categories. ``reduce_zero_label`` is fixed to False. - The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to - '_1stHO.png'. - """ - - CLASSES = ('background', 'vessel') - - PALETTE = [[120, 120, 120], [6, 230, 230]] - - def __init__(self, **kwargs): - super(ChaseDB1Dataset, self).__init__( - img_suffix='.png', - seg_map_suffix='_1stHO.png', - reduce_zero_label=False, - **kwargs) - assert osp.exists(self.img_dir) diff --git a/spaces/ArturStepanenko/digitsSpace/app.py b/spaces/ArturStepanenko/digitsSpace/app.py deleted file mode 100644 index 655fae92fedce3146d56d581b71804641fb37d41..0000000000000000000000000000000000000000 --- a/spaces/ArturStepanenko/digitsSpace/app.py +++ /dev/null @@ -1,14 +0,0 @@ -import numpy as np -import gradio as gr -from tensorflow import keras - -model = keras.models.load_model('my_model') - -def greet(img): - img = np.expand_dims(img, axis=0) - return np.argmax(model.predict(img)[0]) - - -demo = gr.Interface(fn=greet, inputs="sketchpad", outputs="text") - -demo.launch() \ No newline at end of file diff --git a/spaces/BOXNYC/shirley/app.py b/spaces/BOXNYC/shirley/app.py deleted file mode 100644 index f4d34c63ee782feeac61b40b018749a12bb9b01f..0000000000000000000000000000000000000000 --- a/spaces/BOXNYC/shirley/app.py +++ /dev/null @@ -1,40 +0,0 @@ -from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper -from langchain import OpenAI -import gradio as gr -import sys -import os - -#os.environ["OPENAI_API_KEY"] - -def construct_index(directory_path): - max_input_size = 4096 - num_outputs = 512 - max_chunk_overlap = 20 - chunk_size_limit = 600 - - prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit) - - llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.7, model_name="gpt-4", max_tokens=num_outputs)) - - documents = SimpleDirectoryReader(directory_path).load_data() - - index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper) - - index.save_to_disk('index.json') - - return index - -def chatbot(input_text, api_token): - if api_token != os.environ["API_TOKEN"]: - return 'API_TOKEN does not match' - index = GPTSimpleVectorIndex.load_from_disk('index.json') - response = index.query(input_text, response_mode="tree_summarize") - return response.response - -iface = gr.Interface(fn=chatbot, - inputs=[gr.inputs.Textbox(lines=1, label="Ask Shirley"), gr.inputs.Textbox(lines=1, label="API_TOKEN")], - outputs="text", - title="Ask Shirley Anything") - -index = construct_index("docs") -iface.launch() \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Carreras De Caballos Virtuales 3d.md b/spaces/Benson/text-generation/Examples/Descargar Carreras De Caballos Virtuales 3d.md deleted file mode 100644 index c7d849e27542beff9bac8d1fb95cef5caf47d140..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Carreras De Caballos Virtuales 3d.md +++ /dev/null @@ -1,65 +0,0 @@ -
    -

    Descargar Virtual Horse Racing 3D: Una guía para los aficionados a las carreras de caballos

    -

    Si te gustan las carreras de caballos, te encantarán las carreras de caballos virtuales en 3D. Carreras de caballos virtuales 3d es un juego que simula el mundo real de las carreras de caballos en un entorno digital. Puede crear perfiles, hacer apuestas, elegir caballos y jinetes, ver la carrera en impresionantes gráficos en 3D y efectos de sonido, y ganar premios. Si usted es un principiante o un experto, usted encontrará las carreras de caballos virtuales 3d divertido, desafiante y gratificante. En este artículo, le mostraremos cómo descargar carreras de caballos virtuales en 3D en diferentes dispositivos, cómo jugarlo, cómo ganarlo y qué beneficios ofrece. Entonces, ¿qué estás esperando? Sigue leyendo y prepárate para experimentar la emoción de las carreras de caballos virtuales.

    -

    Cómo descargar Virtual Horse Racing 3D en diferentes dispositivos

    -

    Una de las mejores cosas acerca de las carreras de caballos virtuales 3d es que se puede jugar en varios dispositivos. Ya sea que tenga un dispositivo Android, un dispositivo Windows u otro dispositivo, puede encontrar un juego virtual de carreras de caballos en 3D que se adapte a sus preferencias. Estas son algunas de las opciones que puedes elegir:

    -

    descargar carreras de caballos virtuales 3d


    Download File ✶✶✶ https://bltlly.com/2v6JjU



    -

    Dispositivos Android: Google Play Store

    -

    Si tienes un dispositivo Android, puedes descargar Horse Racing 3D desde Google Play Store. Este es uno de los juegos de carreras de caballos virtuales más populares y auténticos disponibles. Tiene más de 10 millones de descargas y una calificación de 4.4 de 5 estrellas. Puedes competir por el dominio en diferentes modos, como el modo carrera, el modo de apuestas o el modo torneo. También puede personalizar sus caballos con diferentes colores y accesorios. El juego es gratuito, pero ofrece compras en la aplicación para características adicionales.

    -

    Dispositivos de Windows: Microsoft Store

    - -

    Otros dispositivos: ZED RUN

    Otros dispositivos: ZED RUN sitio web

    -

    Si tienes otro dispositivo, como un Mac o un iPad, puedes jugar ZED RUN en su sitio web. ZED RUN es un juego de carreras de caballos virtual único e innovador que vive en el blockchain y en un mercado abierto. Puede poseer, criar y competir con sus propios caballos digitales que tienen atributos y rendimiento únicos. También puedes comprar y vender tus caballos usando criptomonedas, como Ethereum. ZED RUN es más que un juego, es una comunidad de entusiastas de las carreras de caballos que comparten su pasión y emoción. ZED RUN es libre de unirse pero requiere una cartera y algunos fondos para comenzar a jugar.

    -

    Cómo jugar carreras de caballos virtuales 3D

    -

    Ahora que sabes cómo descargar carreras de caballos virtuales 3d en diferentes dispositivos, es posible que se pregunte cómo jugarlo. Bueno, el juego es simple e intuitivo, pero también desafiante y gratificante. Estos son los pasos básicos que debes seguir:

    -

    Crear perfiles y hacer apuestas

    -

    Lo primero que tienes que hacer es crear tu perfil y elegir tu moneda preferida. Puedes usar dinero real o monedas virtuales, dependiendo del juego que estés jugando. Entonces, necesitas hacer tus apuestas. Usted puede apostar en uno o más caballos en cada carrera, dependiendo del tipo de apuesta que desea colocar. También puede ajustar la cantidad de su apuesta de acuerdo a su presupuesto y apetito de riesgo.

    -

    Elige caballos y jinetes

    -

    Lo siguiente que tienes que hacer es elegir tus caballos y jinetes. Puedes navegar por la lista de caballos y jinetes disponibles en cada carrera y seleccionar los que se adapten a tus preferencias. También puede ver sus estadísticas, como velocidad, resistencia, agilidad y forma. También puedes personalizar tus caballos con diferentes colores y accesorios, si el juego lo permite.

    -

    Ver la carrera en gráficos 3D y efectos de sonido

    - -

    Cómo ganar carreras de caballos virtuales en 3D

    -

    Por supuesto, jugar carreras de caballos virtuales 3d no solo se trata de ver la carrera, sino también de ganarla. Ganar carreras de caballos virtuales 3d requiere cierta habilidad y conocimiento de las carreras de caballos, así como algo de suerte y estrategia. Aquí hay algunos consejos sobre cómo ganar carreras de caballos virtuales 3d:

    -

    Conozca las probabilidades y los tipos de apuestas

    -

    Una de las cosas más importantes que necesita saber es cómo leer las probabilidades y los tipos de apuestas. Las probabilidades son los números que indican cuán probable es que un caballo gane o se coloque en una carrera. Cuanto más bajas sean las probabilidades, mayor será la probabilidad de ganar, pero también menor será el pago. Cuanto mayores sean las probabilidades, menor será la probabilidad de ganar, pero también mayor será el pago. Los tipos de apuestas son las formas en que puedes apostar a uno o más caballos en una carrera. El tipo más simple de apuesta es una apuesta ganadora, donde se apuesta a un caballo para ganar la carrera. Los otros tipos de apuestas son más complejos e involucran apuestas en múltiples caballos o resultados, como lugar, espectáculo, quinella, exacta, trifecta o superfecta.

    -

    Obtener la suciedad en los caballos' atributos y rendimiento

    -

    Otra cosa importante que necesitas saber es cómo analizar los atributos y el rendimiento de los caballos. Los atributos son las características que afectan el rendimiento de un caballo en una carrera, como la velocidad, la resistencia, la agilidad y la forma. El rendimiento es cómo un caballo se ha desempeñado en carreras anteriores, tales como victorias, pérdidas, lugares, espectáculos o arañazos. Puede utilizar esta información para comparar y contrastar diferentes caballos y jinetes y elegir los que tienen una ventaja sobre los demás.

    -

    -

    Usa estrategias y consejos para aumentar tus posibilidades

    -

    La última cosa importante que necesita saber es cómo usar estrategias y consejos para aumentar sus posibilidades de ganar. Hay muchas estrategias y consejos que puedes usar dependiendo de tu nivel de experiencia y habilidad. Algunos de ellos son:

    - -

    Beneficios de jugar carreras de caballos virtuales 3D

    -

    Jugar carreras de caballos virtuales 3d no solo es divertido y emocionante, sino también beneficioso. Estos son algunos de los beneficios de jugar carreras de caballos virtuales 3d:

    -

    Disfruta de la emoción y la emoción de las carreras de caballos en cualquier momento, en cualquier lugar

    -

    Uno de los principales beneficios de jugar carreras de caballos virtuales 3d es que se puede disfrutar de la emoción y la emoción de las carreras de caballos en cualquier momento, en cualquier lugar. No es necesario ir a una pista de carreras o una tienda de apuestas para experimentar la adrenalina de las carreras de caballos. Usted puede jugar carreras de caballos virtuales 3d en su dispositivo en casa, en el trabajo, o en el camino. También puede elegir entre diferentes modos, pistas y eventos para adaptarse a su estado de ánimo y preferencia.

    -

    Mejorar sus habilidades y conocimientos de carreras de caballos

    - -

    Compite con otros jugadores y gana premios

    -

    Un beneficio final de jugar carreras de caballos virtuales 3d es que puedes competir con otros jugadores y ganar premios. Puedes unirte a torneos y ligas en línea y desafiar a otros jugadores de todo el mundo. También puedes chatear con ellos y compartir tus consejos y opiniones. Jugar carreras de caballos virtuales en 3D puede ayudarte a hacer nuevos amigos y divertirte. También puedes ganar premios reales o virtuales, como dinero en efectivo, monedas, vales o trofeos.

    -

    Conclusión: Por qué usted debe descargar Virtual Horse Racing 3D hoy

    -

    En conclusión, carreras de caballos virtuales 3d es un juego que simula el mundo real de las carreras de caballos en un entorno digital. Puede crear perfiles, hacer apuestas, elegir caballos y jinetes, ver la carrera en impresionantes gráficos en 3D y efectos de sonido, y ganar premios. También puede disfrutar de la emoción y la emoción de las carreras de caballos en cualquier momento, en cualquier lugar; mejorar sus habilidades y conocimientos de carreras de caballos; y competir con otros jugadores y ganar premios. Las carreras de caballos virtuales en 3D son divertidas, desafiantes, gratificantes y beneficiosas. Entonces, ¿qué estás esperando? Descargue las carreras de caballos virtuales 3d hoy y experimente la emoción de las carreras de caballos virtuales.

    -

    Preguntas frecuentes: Preguntas frecuentes sobre las carreras de caballos virtuales en 3D

    -

    Aquí están algunas de las preguntas más frecuentes sobre las carreras de caballos virtuales 3d:

    -

    Q1: Es virtual carreras de caballos 3d libre para jugar?

    -

    A1: Depende del juego que estés jugando. Algunos juegos son gratis pero ofrecen compras en la aplicación para características adicionales. Algunos juegos cuestan una pequeña tarifa pero no tienen anuncios ni compras dentro de la aplicación. Algunos juegos son gratuitos pero requieren una billetera y algunos fondos para comenzar a jugar.

    -

    Q2: ¿Puedo jugar carreras de caballos virtuales 3d offline?

    -

    A2: Depende del juego que esté jugando. Algunos juegos requieren una conexión a Internet para jugar en línea con otros jugadores o acceder a eventos en vivo. Algunos juegos te permiten jugar sin conexión con oponentes generados por ordenador o modos en solitario.

    - -

    A3: Depende del juego que esté jugando. Algunos juegos le permiten poseer y criar sus propios caballos que tienen atributos y rendimiento únicos. Algunos juegos también le permiten comprar y vender sus caballos utilizando criptomoneda. Algunos juegos solo le permiten elegir entre una lista de caballos y jinetes disponibles.

    -

    Q4:

    Q4: ¿Qué tan realistas son las carreras de caballos virtuales 3d?

    -

    A4: Depende del juego que estés jugando. Algunos juegos son muy realistas e inmersivos, con impresionantes gráficos en 3D y efectos de sonido que te hacen sentir como si estuvieras en el hipódromo. Algunos juegos también utilizan datos y estadísticas reales de eventos de carreras de caballos reales y caballos. Algunos juegos son más casuales y caricaturescos, con gráficos simples y efectos de sonido que te hacen sentir como si estuvieras jugando un juego.

    -

    Q5: ¿Cuáles son los mejores juegos virtuales de carreras de caballos en 3D para descargar?

    -

    A5: Depende de sus preferencias y dispositivo. Algunos de los mejores juegos virtuales de carreras de caballos en 3D para descargar son:

    -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Gratis Juego De Carreras De Coches Para Windows 7.md b/spaces/Benson/text-generation/Examples/Descargar Gratis Juego De Carreras De Coches Para Windows 7.md deleted file mode 100644 index 0df6818266164803ff3e036b40795c00e427bfd3..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Gratis Juego De Carreras De Coches Para Windows 7.md +++ /dev/null @@ -1,136 +0,0 @@ - -

    Descargar gratis juego de carreras de coches para Windows 7

    -

    Si te gusta la velocidad, la adrenalina y la emoción, entonces probablemente te gusta jugar juegos de carreras de coches. Los juegos de carreras de coches son uno de los géneros más populares de los videojuegos, ya que ofrecen una variedad de desafíos, entornos, vehículos y modos para adaptarse a diferentes gustos y preferencias. Ya sea que prefieras simulaciones realistas, acción estilo árcade o aventuras todoterreno, hay un juego de carreras de coches para ti.

    -

    Pero ¿qué pasa si usted tiene un Windows 7 PC? ¿Todavía puede jugar juegos de carreras de coches en su antiguo sistema operativo? La respuesta es sí! Windows 7 sigue siendo una plataforma compatible para muchos juegos de carreras de coches, tanto antiguos como nuevos. De hecho, algunos de los mejores juegos de carreras de coches jamás realizados pueden funcionar sin problemas en Windows 7, siempre y cuando tenga los requisitos mínimos del sistema.

    -

    descargar gratis juego de carreras de coches para windows 7


    Download ✺✺✺ https://bltlly.com/2v6Lz3



    -

    En este artículo, le mostraremos cómo encontrar y descargar juegos de carreras de coches gratis para Windows 7, así como algunos de los mejores pagados. También le daremos algunos consejos y consejos sobre cómo optimizar su experiencia de juego en su PC con Windows 7. ¡Así que abróchense el cinturón, enciendan sus motores y prepárense para correr!

    -

    Los mejores juegos de carreras de coches para Windows 7

    -

    Si usted está dispuesto a gastar algo de dinero en juegos de carreras de coches de calidad, entonces usted tiene un montón de opciones para elegir. Estos son algunos de los mejores juegos de carreras de coches que puedes jugar en tu PC con Windows 7.

    -

    Forza Horizon 5

    - -

    Forza Horizon 5 es un juego visualmente impresionante que muestra el poder de las plataformas Xbox Series X|S y Windows 10, pero también puede ejecutarse en Windows 7 con algunos ajustes. Para descargar y jugar Forza Horizon 5 en Windows 7, tendrá que seguir estos pasos:

    -
      -
    1. Asegúrese de que su PC cumple con los requisitos mínimos del sistema para Forza Horizon 5, que son:
        -
      • Procesador: Intel Core i3-4170 o AMD FX-8350
      • -
      • Memoria: 8 GB RAM
      • -
      • Gráficos: NVIDIA GeForce GTX 760 o AMD Radeon R7 260X
      • -
      • DirectX: Versión 11
      • -
      • Almacenamiento: 80 GB de espacio disponible
      • -
      -
    2. -
    3. Descargue e instale el cliente Steam en su PC con Windows 7.
    4. -
    5. Crear una cuenta de Steam o iniciar sesión en la existente.
    6. -
    7. Compra Forza Horizon 5 en la tienda Steam por $59.99 USD o tu equivalente regional.
    8. -
    9. Descargue e instale Forza Horizon 5 en su PC a través del cliente de Steam.
    10. -
    11. Iniciar el juego y disfrutar!
    12. -
    -

    Forza Horizon 5 es un juego que ofrece diversión sin fin y variedad para los amantes de las carreras de coches. Puede elegir entre más de 500 coches con licencia de diferentes fabricantes, como Ferrari, Lamborghini, Ford, Toyota, Honda y más. También puede personalizar sus coches con diferentes trabajos de pintura, calcomanías, llantas, alerones y mejoras de rendimiento. También puede sintonizar sus coches para adaptarse a diferentes terrenos y condiciones climáticas, como la suciedad, el barro, la nieve, la lluvia y la arena.

    -

    -

    El juego también cuenta con un dinámico ciclo día-noche y un sistema de temporada que cambia el entorno y el juego cada semana. Por ejemplo, en invierno, puede deslizarse por carreteras heladas y lagos congelados, mientras que en verano, puede disfrutar del sol y los colores vibrantes de la cultura mexicana. El juego también tiene un motor de física realista que simula el peso, la velocidad, la tracción y el daño de los coches.

    - -

    Si quieres jugar con otras personas, puedes unirte a sesiones online que te permiten explorar el mundo abierto con hasta 72 jugadores. También puede unirse o crear clubes con otros jugadores que compartan sus intereses y preferencias. También puedes retar a otros jugadores a carreras o juegos en el mundo abierto o en arenas personalizadas. También puedes cooperar con otros jugadores en eventos o modos basados en equipos.

    -

    Dirt Rally 2.0

    Dirt Rally 2.0 es un juego que tiene como objetivo ofrecer una experiencia de carreras de coches realista y desafiante, centrándose en la disciplina emocionante y exigente de rally. Lanzado en febrero 2019, Dirt Rally 2.0 es la secuela del aclamado Dirt Rally, que fue elogiado por su auténtica simulación de conducción de rally. Dirt Rally 2.0 se basa en los puntos fuertes de su predecesor, añadiendo más contenido, características y mejoras.

    -

    Dirt Rally 2.0 te permite conocer algunos de los lugares más emblemáticos del mundo, como Nueva Zelanda, Argentina, España, Polonia, Australia y los Estados Unidos. Puedes elegir entre más de 50 coches de rally de diferentes épocas y clases, como los legendarios monstruos del Grupo B, las modernas bestias del WRC y el clásico Mini Cooper S. También puedes personalizar tus coches con diferentes decoraciones, configuraciones y actualizaciones.

    -

    El juego también cuenta con un modo de carrera, donde puedes crear tu propio equipo y competir en varios eventos y campeonatos en todo el mundo. Puede contratar y administrar a su personal, como ingenieros, mecánicos y copilotos, y actualizar sus instalaciones y equipos. También puedes ganar dinero y reputación completando contratos y desafíos.

    - -

    El juego también tiene un sofisticado sistema de copiloto que te proporciona notas de ritmo precisas y oportunas que te guían a través de cada etapa. El copiloto le advertirá de los próximos giros, peligros, saltos y otras características de la carretera. Tendrás que escuchar atentamente y confiar en tu copiloto para navegar por las complejas y variadas etapas.

    -

    Para descargar y jugar Dirt Rally 2.0 en Windows 7, tendrá que seguir estos pasos:

    -
      -
    1. Asegúrese de que su PC cumple con los requisitos mínimos del sistema para Dirt Rally 2.0, que son:
        -
      • Procesador: AMD FX4300 o Intel Core i3 2130
      • -
      • Memoria: 8 GB RAM
      • -
      • Gráficos: AMD HD7750 o NVIDIA GTX650Ti
      • -
      • DirectX: Versión 11
      • -
      • Almacenamiento: 50 GB de espacio disponible
      • -
      -
    2. -
    3. Descargue e instale el cliente Steam en su PC con Windows 7.
    4. -
    5. Crear una cuenta de Steam o iniciar sesión en la existente.
    6. -
    7. Compra Dirt Rally 2.0 en la tienda Steam por $39.99 USD o tu equivalente regional.
    8. -
    9. Descargue e instale Dirt Rally 2.0 en su PC a través del cliente de Steam.
    10. -
    11. Iniciar el juego y disfrutar!
    12. -
    -

    Dirt Rally 2.0 es un juego que ofrece una experiencia de carreras de coches gratificante y satisfactoria para los fanáticos del rally. Puede disfrutar de la emoción de conducir rápido y furioso en algunas de las carreteras más bellas y desafiantes del mundo. También puede probar sus habilidades contra otros jugadores en línea o fuera de línea en varios modos y eventos.

    Necesidad de velocidad más buscados

    -

    Need for Speed Most Wanted es un juego que combina la emoción de las carreras callejeras con la emoción de escapar de la ley. Lanzado en octubre Origin en su PC con Windows 7. -

  • Crear una cuenta de Origin o iniciar sesión en la existente.
  • -
  • Necesidad de compra de velocidad más buscados de la tienda Origin por $19.99 USD o su equivalente regional.
  • -
  • Descargar e instalar La necesidad de velocidad más buscada en su PC a través del cliente de Origin.
  • -
  • Iniciar el juego y disfrutar!
  • - - -

    Juegos de carreras de coches gratis para Windows 7

    -

    Si estás buscando algunos juegos de carreras de coches gratis que puedes jugar en tu PC con Windows 7, entonces también tienes algunas opciones para elegir. Estos son algunos de los juegos de carreras de coches gratis que puedes descargar y jugar en tu PC con Windows 7.

    -

    Conductor de coche ruso: ZIL 130

    -

    Russian Car Driver: ZIL 130 es un juego que te permite conducir un camión soviético clásico a través de diferentes terrenos y escenarios. Lanzado en junio GameTop en su PC con Windows 7. -

  • Crear una cuenta de GameTop o iniciar sesión en la existente.
  • - -
  • Instalar y lanzar el juego a través del cliente GameTop.
  • -
  • Disfruta!
  • - -

    Russian Car Driver: ZIL 130 es un juego que ofrece mucha diversión y desafío para los aficionados a las carreras de coches que quieren probar algo diferente. Usted puede disfrutar de conducir un camión vintage en varios terrenos y escenarios, y personalizarlo a su gusto. También puedes competir en diferentes modos y eventos, o simplemente explorar el mundo abierto a tu propio ritmo.

    -

    Coches locos

    -

    Crazy Cars es un juego que te permite revivir la nostalgia de los clásicos juegos de carreras de coches de estilo árcade de los años 80 y 90. Lanzado en enero 1987, que fue uno de los primeros juegos en presentar física y gráficos realistas.

    -

    Crazy Cars te permite elegir entre cuatro coches diferentes, cada uno con su propia velocidad, manejo y aceleración. También puede elegir entre tres pistas diferentes, cada una con su propio paisaje, tráfico y obstáculos. También puedes elegir entre tres niveles de dificultad diferentes, cada uno con su propio límite de tiempo y número de oponentes.

    -

    El juego también cuenta con controles simples e intuitivos que le permiten dirigir, acelerar, frenar y turbo impulsar su coche. El juego también cuenta con gráficos de estilo retro y efectos de sonido que recrean la sensación de los juegos de carreras de coches de la vieja escuela. El juego también cuenta con un sistema de puntuación alta que te permite competir con otros jugadores de todo el mundo.

    -

    Para descargar y jugar Crazy Cars en Windows 7, tendrá que seguir estos pasos:

    -
      -
    1. Asegúrese de que su PC cumple con los requisitos mínimos del sistema para Crazy Cars, que son:
        -
      • Procesador: Pentium III o equivalente
      • -
      • Memoria: 256 MB RAM
      • -
      • Gráficos: tarjeta de video compatible con DirectX
      • -
      • DirectX: Versión 9.0c
      • -
      • Almacenamiento: 100 MB de espacio disponible
      • -
      -
    2. -
    3. Descargar Crazy Cars desde el sitio web de GameTop gratis.
    4. - -
    5. Siga las instrucciones de instalación y inicie el juego.
    6. -
    7. Disfruta!
    8. -
    -

    Crazy Cars es un juego que ofrece una experiencia de carreras de coches divertida y nostálgica para los fans de los juegos clásicos de estilo árcade. Usted puede disfrutar de la conducción rápida y furiosa en diferentes pistas, y esquivar el tráfico y los obstáculos en el camino. También puede desafiarse a sí mismo para vencer a su propio tiempo y puntuación, o comparar sus resultados con otros jugadores en línea.

    -

    Conclusión

    -

    En este artículo, le hemos mostrado cómo encontrar y descargar juegos de carreras de coches gratis para Windows 7, así como algunos de los mejores pagados. También le hemos dado algunos consejos y consejos sobre cómo optimizar su experiencia de juego en su PC con Windows 7. Esperamos que hayas disfrutado leyendo este artículo y lo hayas encontrado útil e informativo.

    -

    Los juegos de carreras de coches son uno de los géneros más populares y divertidos de los videojuegos, ya que ofrecen una variedad de desafíos, entornos, vehículos y modos para adaptarse a diferentes gustos y preferencias. Ya sea que prefieras simulaciones realistas, acción estilo árcade o aventuras todoterreno, hay un juego de carreras de coches para ti.

    -

    Si usted tiene una PC con Windows 7, todavía puede jugar muchos juegos de carreras de coches, tanto antiguos como nuevos. De hecho, algunos de los mejores juegos de carreras de coches jamás realizados pueden funcionar sin problemas en Windows 7, siempre y cuando tenga los requisitos mínimos del sistema. También puede descargar algunos juegos de carreras de coches gratis de sitios web fiables, como GameTop.

    -

    Para disfrutar de los juegos de carreras de coches en Windows 7, tendrá que descargar e instalar un cliente de juegos, como Steam u Origin, dependiendo del juego que desee jugar. También tendrá que crear una cuenta y comprar el juego de la tienda del juego. También tendrá que actualizar sus controladores y DirectX para garantizar el mejor rendimiento y gráficos del juego. También tendrás que ajustar la configuración y las opciones del juego para adaptarte a tus preferencias y necesidades.

    - -

    Preguntas frecuentes

    -

    ¿Cuáles son los requisitos del sistema para jugar juegos de carreras de coches en Windows 7?

    -

    Los requisitos del sistema para jugar juegos de carreras de coches en Windows 7 varían dependiendo del juego que desee jugar. Sin embargo, una pauta general es que necesitará al menos un procesador de doble núcleo, 2 GB de RAM, una tarjeta de video compatible con DirectX y 20 GB de espacio de almacenamiento. También necesitarás una conexión a Internet estable si quieres jugar online.

    -

    ¿Cómo puedo mejorar el rendimiento y los gráficos de los juegos de carreras de coches en Windows 7?

    -

    Para mejorar el rendimiento y los gráficos de los juegos de carreras de coches en Windows 7, tendrá que actualizar sus controladores y DirectX a las últimas versiones. También tendrá que ajustar la configuración y las opciones del juego para que coincida con las capacidades de su PC y sus preferencias personales. Puede reducir la resolución, la calidad de la textura, las sombras, el anti-aliasing y otras características para aumentar la velocidad de fotogramas y reducir el retraso. También puedes activar o desactivar la sincronización vertical, el desenfoque de movimiento, la profundidad de campo y otros efectos para mejorar la calidad visual y el realismo del juego.

    -

    ¿Dónde puedo encontrar más juegos de carreras de coches gratis para Windows 7?

    -

    Puedes encontrar más juegos de carreras de coches gratis para Windows 7 desde sitios web confiables, como GameTop, que ofrecen una gran colección de juegos de carreras de coches legales y seguros que puedes descargar y jugar sin limitaciones o restricciones. También puede navegar a través de diferentes categorías, géneros, calificaciones y comentarios para encontrar el mejor juego de carreras de coches para usted.

    -

    ¿Cómo puedo jugar juegos de carreras de coches en línea con otros jugadores en Windows 7?

    - -

    ¿Cómo puedo usar un controlador o un volante para jugar juegos de carreras de coches en Windows 7?

    -

    Para usar un controlador o un volante para jugar juegos de carreras de autos en Windows 7, necesitará un dispositivo compatible que se conecte a su PC a través de una conexión USB o inalámbrica. También tendrá que instalar los controladores y el software para su dispositivo si es necesario. A continuación, tendrá que configurar los ajustes y opciones del juego para reconocer y utilizar el dispositivo como un método de entrada. También puedes personalizar los botones, ejes, sensibilidad y forzar la retroalimentación de tu dispositivo para adaptarla a tus preferencias y necesidades.

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/filetypes.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/filetypes.py deleted file mode 100644 index 5948570178f3e6e79d1ff574241d09d4d8ed78de..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/filetypes.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Filetype information. -""" - -from typing import Tuple - -from pip._internal.utils.misc import splitext - -WHEEL_EXTENSION = ".whl" -BZ2_EXTENSIONS: Tuple[str, ...] = (".tar.bz2", ".tbz") -XZ_EXTENSIONS: Tuple[str, ...] = ( - ".tar.xz", - ".txz", - ".tlz", - ".tar.lz", - ".tar.lzma", -) -ZIP_EXTENSIONS: Tuple[str, ...] = (".zip", WHEEL_EXTENSION) -TAR_EXTENSIONS: Tuple[str, ...] = (".tar.gz", ".tgz", ".tar") -ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS - - -def is_archive_file(name: str) -> bool: - """Return True if `name` is a considered as an archive file.""" - ext = splitext(name)[1].lower() - if ext in ARCHIVE_EXTENSIONS: - return True - return False diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/tests/ansi_test.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/tests/ansi_test.py deleted file mode 100644 index 0a20c80f882066e0e1323b0c7f61e22913c32e35..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/tests/ansi_test.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -import sys -from unittest import TestCase, main - -from ..ansi import Back, Fore, Style -from ..ansitowin32 import AnsiToWin32 - -stdout_orig = sys.stdout -stderr_orig = sys.stderr - - -class AnsiTest(TestCase): - - def setUp(self): - # sanity check: stdout should be a file or StringIO object. - # It will only be AnsiToWin32 if init() has previously wrapped it - self.assertNotEqual(type(sys.stdout), AnsiToWin32) - self.assertNotEqual(type(sys.stderr), AnsiToWin32) - - def tearDown(self): - sys.stdout = stdout_orig - sys.stderr = stderr_orig - - - def testForeAttributes(self): - self.assertEqual(Fore.BLACK, '\033[30m') - self.assertEqual(Fore.RED, '\033[31m') - self.assertEqual(Fore.GREEN, '\033[32m') - self.assertEqual(Fore.YELLOW, '\033[33m') - self.assertEqual(Fore.BLUE, '\033[34m') - self.assertEqual(Fore.MAGENTA, '\033[35m') - self.assertEqual(Fore.CYAN, '\033[36m') - self.assertEqual(Fore.WHITE, '\033[37m') - self.assertEqual(Fore.RESET, '\033[39m') - - # Check the light, extended versions. - self.assertEqual(Fore.LIGHTBLACK_EX, '\033[90m') - self.assertEqual(Fore.LIGHTRED_EX, '\033[91m') - self.assertEqual(Fore.LIGHTGREEN_EX, '\033[92m') - self.assertEqual(Fore.LIGHTYELLOW_EX, '\033[93m') - self.assertEqual(Fore.LIGHTBLUE_EX, '\033[94m') - self.assertEqual(Fore.LIGHTMAGENTA_EX, '\033[95m') - self.assertEqual(Fore.LIGHTCYAN_EX, '\033[96m') - self.assertEqual(Fore.LIGHTWHITE_EX, '\033[97m') - - - def testBackAttributes(self): - self.assertEqual(Back.BLACK, '\033[40m') - self.assertEqual(Back.RED, '\033[41m') - self.assertEqual(Back.GREEN, '\033[42m') - self.assertEqual(Back.YELLOW, '\033[43m') - self.assertEqual(Back.BLUE, '\033[44m') - self.assertEqual(Back.MAGENTA, '\033[45m') - self.assertEqual(Back.CYAN, '\033[46m') - self.assertEqual(Back.WHITE, '\033[47m') - self.assertEqual(Back.RESET, '\033[49m') - - # Check the light, extended versions. - self.assertEqual(Back.LIGHTBLACK_EX, '\033[100m') - self.assertEqual(Back.LIGHTRED_EX, '\033[101m') - self.assertEqual(Back.LIGHTGREEN_EX, '\033[102m') - self.assertEqual(Back.LIGHTYELLOW_EX, '\033[103m') - self.assertEqual(Back.LIGHTBLUE_EX, '\033[104m') - self.assertEqual(Back.LIGHTMAGENTA_EX, '\033[105m') - self.assertEqual(Back.LIGHTCYAN_EX, '\033[106m') - self.assertEqual(Back.LIGHTWHITE_EX, '\033[107m') - - - def testStyleAttributes(self): - self.assertEqual(Style.DIM, '\033[2m') - self.assertEqual(Style.NORMAL, '\033[22m') - self.assertEqual(Style.BRIGHT, '\033[1m') - - -if __name__ == '__main__': - main() diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/measure.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/measure.py deleted file mode 100644 index a508ffa80bd715b47c190ed9d747dbc388fa5b19..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/measure.py +++ /dev/null @@ -1,151 +0,0 @@ -from operator import itemgetter -from typing import TYPE_CHECKING, Callable, NamedTuple, Optional, Sequence - -from . import errors -from .protocol import is_renderable, rich_cast - -if TYPE_CHECKING: - from .console import Console, ConsoleOptions, RenderableType - - -class Measurement(NamedTuple): - """Stores the minimum and maximum widths (in characters) required to render an object.""" - - minimum: int - """Minimum number of cells required to render.""" - maximum: int - """Maximum number of cells required to render.""" - - @property - def span(self) -> int: - """Get difference between maximum and minimum.""" - return self.maximum - self.minimum - - def normalize(self) -> "Measurement": - """Get measurement that ensures that minimum <= maximum and minimum >= 0 - - Returns: - Measurement: A normalized measurement. - """ - minimum, maximum = self - minimum = min(max(0, minimum), maximum) - return Measurement(max(0, minimum), max(0, max(minimum, maximum))) - - def with_maximum(self, width: int) -> "Measurement": - """Get a RenderableWith where the widths are <= width. - - Args: - width (int): Maximum desired width. - - Returns: - Measurement: New Measurement object. - """ - minimum, maximum = self - return Measurement(min(minimum, width), min(maximum, width)) - - def with_minimum(self, width: int) -> "Measurement": - """Get a RenderableWith where the widths are >= width. - - Args: - width (int): Minimum desired width. - - Returns: - Measurement: New Measurement object. - """ - minimum, maximum = self - width = max(0, width) - return Measurement(max(minimum, width), max(maximum, width)) - - def clamp( - self, min_width: Optional[int] = None, max_width: Optional[int] = None - ) -> "Measurement": - """Clamp a measurement within the specified range. - - Args: - min_width (int): Minimum desired width, or ``None`` for no minimum. Defaults to None. - max_width (int): Maximum desired width, or ``None`` for no maximum. Defaults to None. - - Returns: - Measurement: New Measurement object. - """ - measurement = self - if min_width is not None: - measurement = measurement.with_minimum(min_width) - if max_width is not None: - measurement = measurement.with_maximum(max_width) - return measurement - - @classmethod - def get( - cls, console: "Console", options: "ConsoleOptions", renderable: "RenderableType" - ) -> "Measurement": - """Get a measurement for a renderable. - - Args: - console (~rich.console.Console): Console instance. - options (~rich.console.ConsoleOptions): Console options. - renderable (RenderableType): An object that may be rendered with Rich. - - Raises: - errors.NotRenderableError: If the object is not renderable. - - Returns: - Measurement: Measurement object containing range of character widths required to render the object. - """ - _max_width = options.max_width - if _max_width < 1: - return Measurement(0, 0) - if isinstance(renderable, str): - renderable = console.render_str( - renderable, markup=options.markup, highlight=False - ) - renderable = rich_cast(renderable) - if is_renderable(renderable): - get_console_width: Optional[ - Callable[["Console", "ConsoleOptions"], "Measurement"] - ] = getattr(renderable, "__rich_measure__", None) - if get_console_width is not None: - render_width = ( - get_console_width(console, options) - .normalize() - .with_maximum(_max_width) - ) - if render_width.maximum < 1: - return Measurement(0, 0) - return render_width.normalize() - else: - return Measurement(0, _max_width) - else: - raise errors.NotRenderableError( - f"Unable to get render width for {renderable!r}; " - "a str, Segment, or object with __rich_console__ method is required" - ) - - -def measure_renderables( - console: "Console", - options: "ConsoleOptions", - renderables: Sequence["RenderableType"], -) -> "Measurement": - """Get a measurement that would fit a number of renderables. - - Args: - console (~rich.console.Console): Console instance. - options (~rich.console.ConsoleOptions): Console options. - renderables (Iterable[RenderableType]): One or more renderable objects. - - Returns: - Measurement: Measurement object containing range of character widths required to - contain all given renderables. - """ - if not renderables: - return Measurement(0, 0) - get_measurement = Measurement.get - measurements = [ - get_measurement(console, options, renderable) for renderable in renderables - ] - measured_width = Measurement( - max(measurements, key=itemgetter(0)).minimum, - max(measurements, key=itemgetter(1)).maximum, - ) - return measured_width diff --git a/spaces/CVPR/LIVE/pybind11/include/pybind11/pytypes.h b/spaces/CVPR/LIVE/pybind11/include/pybind11/pytypes.h deleted file mode 100644 index bea34cd9365c5191be29e986480c8434a8e0201e..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/pybind11/include/pybind11/pytypes.h +++ /dev/null @@ -1,1608 +0,0 @@ -/* - pybind11/pytypes.h: Convenience wrapper classes for basic Python types - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "detail/common.h" -#include "buffer_info.h" -#include -#include - -PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) - -/* A few forward declarations */ -class handle; class object; -class str; class iterator; -struct arg; struct arg_v; - -PYBIND11_NAMESPACE_BEGIN(detail) -class args_proxy; -inline bool isinstance_generic(handle obj, const std::type_info &tp); - -// Accessor forward declarations -template class accessor; -namespace accessor_policies { - struct obj_attr; - struct str_attr; - struct generic_item; - struct sequence_item; - struct list_item; - struct tuple_item; -} -using obj_attr_accessor = accessor; -using str_attr_accessor = accessor; -using item_accessor = accessor; -using sequence_accessor = accessor; -using list_accessor = accessor; -using tuple_accessor = accessor; - -/// Tag and check to identify a class which implements the Python object API -class pyobject_tag { }; -template using is_pyobject = std::is_base_of>; - -/** \rst - A mixin class which adds common functions to `handle`, `object` and various accessors. - The only requirement for `Derived` is to implement ``PyObject *Derived::ptr() const``. -\endrst */ -template -class object_api : public pyobject_tag { - const Derived &derived() const { return static_cast(*this); } - -public: - /** \rst - Return an iterator equivalent to calling ``iter()`` in Python. The object - must be a collection which supports the iteration protocol. - \endrst */ - iterator begin() const; - /// Return a sentinel which ends iteration. - iterator end() const; - - /** \rst - Return an internal functor to invoke the object's sequence protocol. Casting - the returned ``detail::item_accessor`` instance to a `handle` or `object` - subclass causes a corresponding call to ``__getitem__``. Assigning a `handle` - or `object` subclass causes a call to ``__setitem__``. - \endrst */ - item_accessor operator[](handle key) const; - /// See above (the only difference is that they key is provided as a string literal) - item_accessor operator[](const char *key) const; - - /** \rst - Return an internal functor to access the object's attributes. Casting the - returned ``detail::obj_attr_accessor`` instance to a `handle` or `object` - subclass causes a corresponding call to ``getattr``. Assigning a `handle` - or `object` subclass causes a call to ``setattr``. - \endrst */ - obj_attr_accessor attr(handle key) const; - /// See above (the only difference is that they key is provided as a string literal) - str_attr_accessor attr(const char *key) const; - - /** \rst - Matches * unpacking in Python, e.g. to unpack arguments out of a ``tuple`` - or ``list`` for a function call. Applying another * to the result yields - ** unpacking, e.g. to unpack a dict as function keyword arguments. - See :ref:`calling_python_functions`. - \endrst */ - args_proxy operator*() const; - - /// Check if the given item is contained within this object, i.e. ``item in obj``. - template bool contains(T &&item) const; - - /** \rst - Assuming the Python object is a function or implements the ``__call__`` - protocol, ``operator()`` invokes the underlying function, passing an - arbitrary set of parameters. The result is returned as a `object` and - may need to be converted back into a Python object using `handle::cast()`. - - When some of the arguments cannot be converted to Python objects, the - function will throw a `cast_error` exception. When the Python function - call fails, a `error_already_set` exception is thrown. - \endrst */ - template - object operator()(Args &&...args) const; - template - PYBIND11_DEPRECATED("call(...) was deprecated in favor of operator()(...)") - object call(Args&&... args) const; - - /// Equivalent to ``obj is other`` in Python. - bool is(object_api const& other) const { return derived().ptr() == other.derived().ptr(); } - /// Equivalent to ``obj is None`` in Python. - bool is_none() const { return derived().ptr() == Py_None; } - /// Equivalent to obj == other in Python - bool equal(object_api const &other) const { return rich_compare(other, Py_EQ); } - bool not_equal(object_api const &other) const { return rich_compare(other, Py_NE); } - bool operator<(object_api const &other) const { return rich_compare(other, Py_LT); } - bool operator<=(object_api const &other) const { return rich_compare(other, Py_LE); } - bool operator>(object_api const &other) const { return rich_compare(other, Py_GT); } - bool operator>=(object_api const &other) const { return rich_compare(other, Py_GE); } - - object operator-() const; - object operator~() const; - object operator+(object_api const &other) const; - object operator+=(object_api const &other) const; - object operator-(object_api const &other) const; - object operator-=(object_api const &other) const; - object operator*(object_api const &other) const; - object operator*=(object_api const &other) const; - object operator/(object_api const &other) const; - object operator/=(object_api const &other) const; - object operator|(object_api const &other) const; - object operator|=(object_api const &other) const; - object operator&(object_api const &other) const; - object operator&=(object_api const &other) const; - object operator^(object_api const &other) const; - object operator^=(object_api const &other) const; - object operator<<(object_api const &other) const; - object operator<<=(object_api const &other) const; - object operator>>(object_api const &other) const; - object operator>>=(object_api const &other) const; - - PYBIND11_DEPRECATED("Use py::str(obj) instead") - pybind11::str str() const; - - /// Get or set the object's docstring, i.e. ``obj.__doc__``. - str_attr_accessor doc() const; - - /// Return the object's current reference count - int ref_count() const { return static_cast(Py_REFCNT(derived().ptr())); } - /// Return a handle to the Python type object underlying the instance - handle get_type() const; - -private: - bool rich_compare(object_api const &other, int value) const; -}; - -PYBIND11_NAMESPACE_END(detail) - -/** \rst - Holds a reference to a Python object (no reference counting) - - The `handle` class is a thin wrapper around an arbitrary Python object (i.e. a - ``PyObject *`` in Python's C API). It does not perform any automatic reference - counting and merely provides a basic C++ interface to various Python API functions. - - .. seealso:: - The `object` class inherits from `handle` and adds automatic reference - counting features. -\endrst */ -class handle : public detail::object_api { -public: - /// The default constructor creates a handle with a ``nullptr``-valued pointer - handle() = default; - /// Creates a ``handle`` from the given raw Python object pointer - handle(PyObject *ptr) : m_ptr(ptr) { } // Allow implicit conversion from PyObject* - - /// Return the underlying ``PyObject *`` pointer - PyObject *ptr() const { return m_ptr; } - PyObject *&ptr() { return m_ptr; } - - /** \rst - Manually increase the reference count of the Python object. Usually, it is - preferable to use the `object` class which derives from `handle` and calls - this function automatically. Returns a reference to itself. - \endrst */ - const handle& inc_ref() const & { Py_XINCREF(m_ptr); return *this; } - - /** \rst - Manually decrease the reference count of the Python object. Usually, it is - preferable to use the `object` class which derives from `handle` and calls - this function automatically. Returns a reference to itself. - \endrst */ - const handle& dec_ref() const & { Py_XDECREF(m_ptr); return *this; } - - /** \rst - Attempt to cast the Python object into the given C++ type. A `cast_error` - will be throw upon failure. - \endrst */ - template T cast() const; - /// Return ``true`` when the `handle` wraps a valid Python object - explicit operator bool() const { return m_ptr != nullptr; } - /** \rst - Deprecated: Check that the underlying pointers are the same. - Equivalent to ``obj1 is obj2`` in Python. - \endrst */ - PYBIND11_DEPRECATED("Use obj1.is(obj2) instead") - bool operator==(const handle &h) const { return m_ptr == h.m_ptr; } - PYBIND11_DEPRECATED("Use !obj1.is(obj2) instead") - bool operator!=(const handle &h) const { return m_ptr != h.m_ptr; } - PYBIND11_DEPRECATED("Use handle::operator bool() instead") - bool check() const { return m_ptr != nullptr; } -protected: - PyObject *m_ptr = nullptr; -}; - -/** \rst - Holds a reference to a Python object (with reference counting) - - Like `handle`, the `object` class is a thin wrapper around an arbitrary Python - object (i.e. a ``PyObject *`` in Python's C API). In contrast to `handle`, it - optionally increases the object's reference count upon construction, and it - *always* decreases the reference count when the `object` instance goes out of - scope and is destructed. When using `object` instances consistently, it is much - easier to get reference counting right at the first attempt. -\endrst */ -class object : public handle { -public: - object() = default; - PYBIND11_DEPRECATED("Use reinterpret_borrow() or reinterpret_steal()") - object(handle h, bool is_borrowed) : handle(h) { if (is_borrowed) inc_ref(); } - /// Copy constructor; always increases the reference count - object(const object &o) : handle(o) { inc_ref(); } - /// Move constructor; steals the object from ``other`` and preserves its reference count - object(object &&other) noexcept { m_ptr = other.m_ptr; other.m_ptr = nullptr; } - /// Destructor; automatically calls `handle::dec_ref()` - ~object() { dec_ref(); } - - /** \rst - Resets the internal pointer to ``nullptr`` without decreasing the - object's reference count. The function returns a raw handle to the original - Python object. - \endrst */ - handle release() { - PyObject *tmp = m_ptr; - m_ptr = nullptr; - return handle(tmp); - } - - object& operator=(const object &other) { - other.inc_ref(); - dec_ref(); - m_ptr = other.m_ptr; - return *this; - } - - object& operator=(object &&other) noexcept { - if (this != &other) { - handle temp(m_ptr); - m_ptr = other.m_ptr; - other.m_ptr = nullptr; - temp.dec_ref(); - } - return *this; - } - - // Calling cast() on an object lvalue just copies (via handle::cast) - template T cast() const &; - // Calling on an object rvalue does a move, if needed and/or possible - template T cast() &&; - -protected: - // Tags for choosing constructors from raw PyObject * - struct borrowed_t { }; - struct stolen_t { }; - - template friend T reinterpret_borrow(handle); - template friend T reinterpret_steal(handle); - -public: - // Only accessible from derived classes and the reinterpret_* functions - object(handle h, borrowed_t) : handle(h) { inc_ref(); } - object(handle h, stolen_t) : handle(h) { } -}; - -/** \rst - Declare that a `handle` or ``PyObject *`` is a certain type and borrow the reference. - The target type ``T`` must be `object` or one of its derived classes. The function - doesn't do any conversions or checks. It's up to the user to make sure that the - target type is correct. - - .. code-block:: cpp - - PyObject *p = PyList_GetItem(obj, index); - py::object o = reinterpret_borrow(p); - // or - py::tuple t = reinterpret_borrow(p); // <-- `p` must be already be a `tuple` -\endrst */ -template T reinterpret_borrow(handle h) { return {h, object::borrowed_t{}}; } - -/** \rst - Like `reinterpret_borrow`, but steals the reference. - - .. code-block:: cpp - - PyObject *p = PyObject_Str(obj); - py::str s = reinterpret_steal(p); // <-- `p` must be already be a `str` -\endrst */ -template T reinterpret_steal(handle h) { return {h, object::stolen_t{}}; } - -PYBIND11_NAMESPACE_BEGIN(detail) -inline std::string error_string(); -PYBIND11_NAMESPACE_END(detail) - -/// Fetch and hold an error which was already set in Python. An instance of this is typically -/// thrown to propagate python-side errors back through C++ which can either be caught manually or -/// else falls back to the function dispatcher (which then raises the captured error back to -/// python). -class error_already_set : public std::runtime_error { -public: - /// Constructs a new exception from the current Python error indicator, if any. The current - /// Python error indicator will be cleared. - error_already_set() : std::runtime_error(detail::error_string()) { - PyErr_Fetch(&m_type.ptr(), &m_value.ptr(), &m_trace.ptr()); - } - - error_already_set(const error_already_set &) = default; - error_already_set(error_already_set &&) = default; - - inline ~error_already_set(); - - /// Give the currently-held error back to Python, if any. If there is currently a Python error - /// already set it is cleared first. After this call, the current object no longer stores the - /// error variables (but the `.what()` string is still available). - void restore() { PyErr_Restore(m_type.release().ptr(), m_value.release().ptr(), m_trace.release().ptr()); } - - /// If it is impossible to raise the currently-held error, such as in destructor, we can write - /// it out using Python's unraisable hook (sys.unraisablehook). The error context should be - /// some object whose repr() helps identify the location of the error. Python already knows the - /// type and value of the error, so there is no need to repeat that. For example, __func__ could - /// be helpful. After this call, the current object no longer stores the error variables, - /// and neither does Python. - void discard_as_unraisable(object err_context) { - restore(); - PyErr_WriteUnraisable(err_context.ptr()); - } - void discard_as_unraisable(const char *err_context) { - discard_as_unraisable(reinterpret_steal(PYBIND11_FROM_STRING(err_context))); - } - - // Does nothing; provided for backwards compatibility. - PYBIND11_DEPRECATED("Use of error_already_set.clear() is deprecated") - void clear() {} - - /// Check if the currently trapped error type matches the given Python exception class (or a - /// subclass thereof). May also be passed a tuple to search for any exception class matches in - /// the given tuple. - bool matches(handle exc) const { return PyErr_GivenExceptionMatches(m_type.ptr(), exc.ptr()); } - - const object& type() const { return m_type; } - const object& value() const { return m_value; } - const object& trace() const { return m_trace; } - -private: - object m_type, m_value, m_trace; -}; - -/** \defgroup python_builtins _ - Unless stated otherwise, the following C++ functions behave the same - as their Python counterparts. - */ - -/** \ingroup python_builtins - \rst - Return true if ``obj`` is an instance of ``T``. Type ``T`` must be a subclass of - `object` or a class which was exposed to Python as ``py::class_``. -\endrst */ -template ::value, int> = 0> -bool isinstance(handle obj) { return T::check_(obj); } - -template ::value, int> = 0> -bool isinstance(handle obj) { return detail::isinstance_generic(obj, typeid(T)); } - -template <> inline bool isinstance(handle) = delete; -template <> inline bool isinstance(handle obj) { return obj.ptr() != nullptr; } - -/// \ingroup python_builtins -/// Return true if ``obj`` is an instance of the ``type``. -inline bool isinstance(handle obj, handle type) { - const auto result = PyObject_IsInstance(obj.ptr(), type.ptr()); - if (result == -1) - throw error_already_set(); - return result != 0; -} - -/// \addtogroup python_builtins -/// @{ -inline bool hasattr(handle obj, handle name) { - return PyObject_HasAttr(obj.ptr(), name.ptr()) == 1; -} - -inline bool hasattr(handle obj, const char *name) { - return PyObject_HasAttrString(obj.ptr(), name) == 1; -} - -inline void delattr(handle obj, handle name) { - if (PyObject_DelAttr(obj.ptr(), name.ptr()) != 0) { throw error_already_set(); } -} - -inline void delattr(handle obj, const char *name) { - if (PyObject_DelAttrString(obj.ptr(), name) != 0) { throw error_already_set(); } -} - -inline object getattr(handle obj, handle name) { - PyObject *result = PyObject_GetAttr(obj.ptr(), name.ptr()); - if (!result) { throw error_already_set(); } - return reinterpret_steal(result); -} - -inline object getattr(handle obj, const char *name) { - PyObject *result = PyObject_GetAttrString(obj.ptr(), name); - if (!result) { throw error_already_set(); } - return reinterpret_steal(result); -} - -inline object getattr(handle obj, handle name, handle default_) { - if (PyObject *result = PyObject_GetAttr(obj.ptr(), name.ptr())) { - return reinterpret_steal(result); - } else { - PyErr_Clear(); - return reinterpret_borrow(default_); - } -} - -inline object getattr(handle obj, const char *name, handle default_) { - if (PyObject *result = PyObject_GetAttrString(obj.ptr(), name)) { - return reinterpret_steal(result); - } else { - PyErr_Clear(); - return reinterpret_borrow(default_); - } -} - -inline void setattr(handle obj, handle name, handle value) { - if (PyObject_SetAttr(obj.ptr(), name.ptr(), value.ptr()) != 0) { throw error_already_set(); } -} - -inline void setattr(handle obj, const char *name, handle value) { - if (PyObject_SetAttrString(obj.ptr(), name, value.ptr()) != 0) { throw error_already_set(); } -} - -inline ssize_t hash(handle obj) { - auto h = PyObject_Hash(obj.ptr()); - if (h == -1) { throw error_already_set(); } - return h; -} - -/// @} python_builtins - -PYBIND11_NAMESPACE_BEGIN(detail) -inline handle get_function(handle value) { - if (value) { -#if PY_MAJOR_VERSION >= 3 - if (PyInstanceMethod_Check(value.ptr())) - value = PyInstanceMethod_GET_FUNCTION(value.ptr()); - else -#endif - if (PyMethod_Check(value.ptr())) - value = PyMethod_GET_FUNCTION(value.ptr()); - } - return value; -} - -// Helper aliases/functions to support implicit casting of values given to python accessors/methods. -// When given a pyobject, this simply returns the pyobject as-is; for other C++ type, the value goes -// through pybind11::cast(obj) to convert it to an `object`. -template ::value, int> = 0> -auto object_or_cast(T &&o) -> decltype(std::forward(o)) { return std::forward(o); } -// The following casting version is implemented in cast.h: -template ::value, int> = 0> -object object_or_cast(T &&o); -// Match a PyObject*, which we want to convert directly to handle via its converting constructor -inline handle object_or_cast(PyObject *ptr) { return ptr; } - -template -class accessor : public object_api> { - using key_type = typename Policy::key_type; - -public: - accessor(handle obj, key_type key) : obj(obj), key(std::move(key)) { } - accessor(const accessor &) = default; - accessor(accessor &&) = default; - - // accessor overload required to override default assignment operator (templates are not allowed - // to replace default compiler-generated assignments). - void operator=(const accessor &a) && { std::move(*this).operator=(handle(a)); } - void operator=(const accessor &a) & { operator=(handle(a)); } - - template void operator=(T &&value) && { - Policy::set(obj, key, object_or_cast(std::forward(value))); - } - template void operator=(T &&value) & { - get_cache() = reinterpret_borrow(object_or_cast(std::forward(value))); - } - - template - PYBIND11_DEPRECATED("Use of obj.attr(...) as bool is deprecated in favor of pybind11::hasattr(obj, ...)") - explicit operator enable_if_t::value || - std::is_same::value, bool>() const { - return hasattr(obj, key); - } - template - PYBIND11_DEPRECATED("Use of obj[key] as bool is deprecated in favor of obj.contains(key)") - explicit operator enable_if_t::value, bool>() const { - return obj.contains(key); - } - - operator object() const { return get_cache(); } - PyObject *ptr() const { return get_cache().ptr(); } - template T cast() const { return get_cache().template cast(); } - -private: - object &get_cache() const { - if (!cache) { cache = Policy::get(obj, key); } - return cache; - } - -private: - handle obj; - key_type key; - mutable object cache; -}; - -PYBIND11_NAMESPACE_BEGIN(accessor_policies) -struct obj_attr { - using key_type = object; - static object get(handle obj, handle key) { return getattr(obj, key); } - static void set(handle obj, handle key, handle val) { setattr(obj, key, val); } -}; - -struct str_attr { - using key_type = const char *; - static object get(handle obj, const char *key) { return getattr(obj, key); } - static void set(handle obj, const char *key, handle val) { setattr(obj, key, val); } -}; - -struct generic_item { - using key_type = object; - - static object get(handle obj, handle key) { - PyObject *result = PyObject_GetItem(obj.ptr(), key.ptr()); - if (!result) { throw error_already_set(); } - return reinterpret_steal(result); - } - - static void set(handle obj, handle key, handle val) { - if (PyObject_SetItem(obj.ptr(), key.ptr(), val.ptr()) != 0) { throw error_already_set(); } - } -}; - -struct sequence_item { - using key_type = size_t; - - static object get(handle obj, size_t index) { - PyObject *result = PySequence_GetItem(obj.ptr(), static_cast(index)); - if (!result) { throw error_already_set(); } - return reinterpret_steal(result); - } - - static void set(handle obj, size_t index, handle val) { - // PySequence_SetItem does not steal a reference to 'val' - if (PySequence_SetItem(obj.ptr(), static_cast(index), val.ptr()) != 0) { - throw error_already_set(); - } - } -}; - -struct list_item { - using key_type = size_t; - - static object get(handle obj, size_t index) { - PyObject *result = PyList_GetItem(obj.ptr(), static_cast(index)); - if (!result) { throw error_already_set(); } - return reinterpret_borrow(result); - } - - static void set(handle obj, size_t index, handle val) { - // PyList_SetItem steals a reference to 'val' - if (PyList_SetItem(obj.ptr(), static_cast(index), val.inc_ref().ptr()) != 0) { - throw error_already_set(); - } - } -}; - -struct tuple_item { - using key_type = size_t; - - static object get(handle obj, size_t index) { - PyObject *result = PyTuple_GetItem(obj.ptr(), static_cast(index)); - if (!result) { throw error_already_set(); } - return reinterpret_borrow(result); - } - - static void set(handle obj, size_t index, handle val) { - // PyTuple_SetItem steals a reference to 'val' - if (PyTuple_SetItem(obj.ptr(), static_cast(index), val.inc_ref().ptr()) != 0) { - throw error_already_set(); - } - } -}; -PYBIND11_NAMESPACE_END(accessor_policies) - -/// STL iterator template used for tuple, list, sequence and dict -template -class generic_iterator : public Policy { - using It = generic_iterator; - -public: - using difference_type = ssize_t; - using iterator_category = typename Policy::iterator_category; - using value_type = typename Policy::value_type; - using reference = typename Policy::reference; - using pointer = typename Policy::pointer; - - generic_iterator() = default; - generic_iterator(handle seq, ssize_t index) : Policy(seq, index) { } - - reference operator*() const { return Policy::dereference(); } - reference operator[](difference_type n) const { return *(*this + n); } - pointer operator->() const { return **this; } - - It &operator++() { Policy::increment(); return *this; } - It operator++(int) { auto copy = *this; Policy::increment(); return copy; } - It &operator--() { Policy::decrement(); return *this; } - It operator--(int) { auto copy = *this; Policy::decrement(); return copy; } - It &operator+=(difference_type n) { Policy::advance(n); return *this; } - It &operator-=(difference_type n) { Policy::advance(-n); return *this; } - - friend It operator+(const It &a, difference_type n) { auto copy = a; return copy += n; } - friend It operator+(difference_type n, const It &b) { return b + n; } - friend It operator-(const It &a, difference_type n) { auto copy = a; return copy -= n; } - friend difference_type operator-(const It &a, const It &b) { return a.distance_to(b); } - - friend bool operator==(const It &a, const It &b) { return a.equal(b); } - friend bool operator!=(const It &a, const It &b) { return !(a == b); } - friend bool operator< (const It &a, const It &b) { return b - a > 0; } - friend bool operator> (const It &a, const It &b) { return b < a; } - friend bool operator>=(const It &a, const It &b) { return !(a < b); } - friend bool operator<=(const It &a, const It &b) { return !(a > b); } -}; - -PYBIND11_NAMESPACE_BEGIN(iterator_policies) -/// Quick proxy class needed to implement ``operator->`` for iterators which can't return pointers -template -struct arrow_proxy { - T value; - - arrow_proxy(T &&value) : value(std::move(value)) { } - T *operator->() const { return &value; } -}; - -/// Lightweight iterator policy using just a simple pointer: see ``PySequence_Fast_ITEMS`` -class sequence_fast_readonly { -protected: - using iterator_category = std::random_access_iterator_tag; - using value_type = handle; - using reference = const handle; - using pointer = arrow_proxy; - - sequence_fast_readonly(handle obj, ssize_t n) : ptr(PySequence_Fast_ITEMS(obj.ptr()) + n) { } - - reference dereference() const { return *ptr; } - void increment() { ++ptr; } - void decrement() { --ptr; } - void advance(ssize_t n) { ptr += n; } - bool equal(const sequence_fast_readonly &b) const { return ptr == b.ptr; } - ssize_t distance_to(const sequence_fast_readonly &b) const { return ptr - b.ptr; } - -private: - PyObject **ptr; -}; - -/// Full read and write access using the sequence protocol: see ``detail::sequence_accessor`` -class sequence_slow_readwrite { -protected: - using iterator_category = std::random_access_iterator_tag; - using value_type = object; - using reference = sequence_accessor; - using pointer = arrow_proxy; - - sequence_slow_readwrite(handle obj, ssize_t index) : obj(obj), index(index) { } - - reference dereference() const { return {obj, static_cast(index)}; } - void increment() { ++index; } - void decrement() { --index; } - void advance(ssize_t n) { index += n; } - bool equal(const sequence_slow_readwrite &b) const { return index == b.index; } - ssize_t distance_to(const sequence_slow_readwrite &b) const { return index - b.index; } - -private: - handle obj; - ssize_t index; -}; - -/// Python's dictionary protocol permits this to be a forward iterator -class dict_readonly { -protected: - using iterator_category = std::forward_iterator_tag; - using value_type = std::pair; - using reference = const value_type; - using pointer = arrow_proxy; - - dict_readonly() = default; - dict_readonly(handle obj, ssize_t pos) : obj(obj), pos(pos) { increment(); } - - reference dereference() const { return {key, value}; } - void increment() { if (!PyDict_Next(obj.ptr(), &pos, &key, &value)) { pos = -1; } } - bool equal(const dict_readonly &b) const { return pos == b.pos; } - -private: - handle obj; - PyObject *key = nullptr, *value = nullptr; - ssize_t pos = -1; -}; -PYBIND11_NAMESPACE_END(iterator_policies) - -#if !defined(PYPY_VERSION) -using tuple_iterator = generic_iterator; -using list_iterator = generic_iterator; -#else -using tuple_iterator = generic_iterator; -using list_iterator = generic_iterator; -#endif - -using sequence_iterator = generic_iterator; -using dict_iterator = generic_iterator; - -inline bool PyIterable_Check(PyObject *obj) { - PyObject *iter = PyObject_GetIter(obj); - if (iter) { - Py_DECREF(iter); - return true; - } else { - PyErr_Clear(); - return false; - } -} - -inline bool PyNone_Check(PyObject *o) { return o == Py_None; } -inline bool PyEllipsis_Check(PyObject *o) { return o == Py_Ellipsis; } - -inline bool PyUnicode_Check_Permissive(PyObject *o) { return PyUnicode_Check(o) || PYBIND11_BYTES_CHECK(o); } - -inline bool PyStaticMethod_Check(PyObject *o) { return o->ob_type == &PyStaticMethod_Type; } - -class kwargs_proxy : public handle { -public: - explicit kwargs_proxy(handle h) : handle(h) { } -}; - -class args_proxy : public handle { -public: - explicit args_proxy(handle h) : handle(h) { } - kwargs_proxy operator*() const { return kwargs_proxy(*this); } -}; - -/// Python argument categories (using PEP 448 terms) -template using is_keyword = std::is_base_of; -template using is_s_unpacking = std::is_same; // * unpacking -template using is_ds_unpacking = std::is_same; // ** unpacking -template using is_positional = satisfies_none_of; -template using is_keyword_or_ds = satisfies_any_of; - -// Call argument collector forward declarations -template -class simple_collector; -template -class unpacking_collector; - -PYBIND11_NAMESPACE_END(detail) - -// TODO: After the deprecated constructors are removed, this macro can be simplified by -// inheriting ctors: `using Parent::Parent`. It's not an option right now because -// the `using` statement triggers the parent deprecation warning even if the ctor -// isn't even used. -#define PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \ - public: \ - PYBIND11_DEPRECATED("Use reinterpret_borrow<"#Name">() or reinterpret_steal<"#Name">()") \ - Name(handle h, bool is_borrowed) : Parent(is_borrowed ? Parent(h, borrowed_t{}) : Parent(h, stolen_t{})) { } \ - Name(handle h, borrowed_t) : Parent(h, borrowed_t{}) { } \ - Name(handle h, stolen_t) : Parent(h, stolen_t{}) { } \ - PYBIND11_DEPRECATED("Use py::isinstance(obj) instead") \ - bool check() const { return m_ptr != nullptr && (bool) CheckFun(m_ptr); } \ - static bool check_(handle h) { return h.ptr() != nullptr && CheckFun(h.ptr()); } \ - template \ - Name(const ::pybind11::detail::accessor &a) : Name(object(a)) { } - -#define PYBIND11_OBJECT_CVT(Name, Parent, CheckFun, ConvertFun) \ - PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \ - /* This is deliberately not 'explicit' to allow implicit conversion from object: */ \ - Name(const object &o) \ - : Parent(check_(o) ? o.inc_ref().ptr() : ConvertFun(o.ptr()), stolen_t{}) \ - { if (!m_ptr) throw error_already_set(); } \ - Name(object &&o) \ - : Parent(check_(o) ? o.release().ptr() : ConvertFun(o.ptr()), stolen_t{}) \ - { if (!m_ptr) throw error_already_set(); } - -#define PYBIND11_OBJECT(Name, Parent, CheckFun) \ - PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \ - /* This is deliberately not 'explicit' to allow implicit conversion from object: */ \ - Name(const object &o) : Parent(o) { } \ - Name(object &&o) : Parent(std::move(o)) { } - -#define PYBIND11_OBJECT_DEFAULT(Name, Parent, CheckFun) \ - PYBIND11_OBJECT(Name, Parent, CheckFun) \ - Name() : Parent() { } - -/// \addtogroup pytypes -/// @{ - -/** \rst - Wraps a Python iterator so that it can also be used as a C++ input iterator - - Caveat: copying an iterator does not (and cannot) clone the internal - state of the Python iterable. This also applies to the post-increment - operator. This iterator should only be used to retrieve the current - value using ``operator*()``. -\endrst */ -class iterator : public object { -public: - using iterator_category = std::input_iterator_tag; - using difference_type = ssize_t; - using value_type = handle; - using reference = const handle; - using pointer = const handle *; - - PYBIND11_OBJECT_DEFAULT(iterator, object, PyIter_Check) - - iterator& operator++() { - advance(); - return *this; - } - - iterator operator++(int) { - auto rv = *this; - advance(); - return rv; - } - - reference operator*() const { - if (m_ptr && !value.ptr()) { - auto& self = const_cast(*this); - self.advance(); - } - return value; - } - - pointer operator->() const { operator*(); return &value; } - - /** \rst - The value which marks the end of the iteration. ``it == iterator::sentinel()`` - is equivalent to catching ``StopIteration`` in Python. - - .. code-block:: cpp - - void foo(py::iterator it) { - while (it != py::iterator::sentinel()) { - // use `*it` - ++it; - } - } - \endrst */ - static iterator sentinel() { return {}; } - - friend bool operator==(const iterator &a, const iterator &b) { return a->ptr() == b->ptr(); } - friend bool operator!=(const iterator &a, const iterator &b) { return a->ptr() != b->ptr(); } - -private: - void advance() { - value = reinterpret_steal(PyIter_Next(m_ptr)); - if (PyErr_Occurred()) { throw error_already_set(); } - } - -private: - object value = {}; -}; - -class iterable : public object { -public: - PYBIND11_OBJECT_DEFAULT(iterable, object, detail::PyIterable_Check) -}; - -class bytes; - -class str : public object { -public: - PYBIND11_OBJECT_CVT(str, object, detail::PyUnicode_Check_Permissive, raw_str) - - str(const char *c, size_t n) - : object(PyUnicode_FromStringAndSize(c, (ssize_t) n), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate string object!"); - } - - // 'explicit' is explicitly omitted from the following constructors to allow implicit conversion to py::str from C++ string-like objects - str(const char *c = "") - : object(PyUnicode_FromString(c), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate string object!"); - } - - str(const std::string &s) : str(s.data(), s.size()) { } - - explicit str(const bytes &b); - - /** \rst - Return a string representation of the object. This is analogous to - the ``str()`` function in Python. - \endrst */ - explicit str(handle h) : object(raw_str(h.ptr()), stolen_t{}) { } - - operator std::string() const { - object temp = *this; - if (PyUnicode_Check(m_ptr)) { - temp = reinterpret_steal(PyUnicode_AsUTF8String(m_ptr)); - if (!temp) - pybind11_fail("Unable to extract string contents! (encoding issue)"); - } - char *buffer; - ssize_t length; - if (PYBIND11_BYTES_AS_STRING_AND_SIZE(temp.ptr(), &buffer, &length)) - pybind11_fail("Unable to extract string contents! (invalid type)"); - return std::string(buffer, (size_t) length); - } - - template - str format(Args &&...args) const { - return attr("format")(std::forward(args)...); - } - -private: - /// Return string representation -- always returns a new reference, even if already a str - static PyObject *raw_str(PyObject *op) { - PyObject *str_value = PyObject_Str(op); - if (!str_value) throw error_already_set(); -#if PY_MAJOR_VERSION < 3 - PyObject *unicode = PyUnicode_FromEncodedObject(str_value, "utf-8", nullptr); - Py_XDECREF(str_value); str_value = unicode; -#endif - return str_value; - } -}; -/// @} pytypes - -inline namespace literals { -/** \rst - String literal version of `str` - \endrst */ -inline str operator"" _s(const char *s, size_t size) { return {s, size}; } -} - -/// \addtogroup pytypes -/// @{ -class bytes : public object { -public: - PYBIND11_OBJECT(bytes, object, PYBIND11_BYTES_CHECK) - - // Allow implicit conversion: - bytes(const char *c = "") - : object(PYBIND11_BYTES_FROM_STRING(c), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate bytes object!"); - } - - bytes(const char *c, size_t n) - : object(PYBIND11_BYTES_FROM_STRING_AND_SIZE(c, (ssize_t) n), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate bytes object!"); - } - - // Allow implicit conversion: - bytes(const std::string &s) : bytes(s.data(), s.size()) { } - - explicit bytes(const pybind11::str &s); - - operator std::string() const { - char *buffer; - ssize_t length; - if (PYBIND11_BYTES_AS_STRING_AND_SIZE(m_ptr, &buffer, &length)) - pybind11_fail("Unable to extract bytes contents!"); - return std::string(buffer, (size_t) length); - } -}; -// Note: breathe >= 4.17.0 will fail to build docs if the below two constructors -// are included in the doxygen group; close here and reopen after as a workaround -/// @} pytypes - -inline bytes::bytes(const pybind11::str &s) { - object temp = s; - if (PyUnicode_Check(s.ptr())) { - temp = reinterpret_steal(PyUnicode_AsUTF8String(s.ptr())); - if (!temp) - pybind11_fail("Unable to extract string contents! (encoding issue)"); - } - char *buffer; - ssize_t length; - if (PYBIND11_BYTES_AS_STRING_AND_SIZE(temp.ptr(), &buffer, &length)) - pybind11_fail("Unable to extract string contents! (invalid type)"); - auto obj = reinterpret_steal(PYBIND11_BYTES_FROM_STRING_AND_SIZE(buffer, length)); - if (!obj) - pybind11_fail("Could not allocate bytes object!"); - m_ptr = obj.release().ptr(); -} - -inline str::str(const bytes& b) { - char *buffer; - ssize_t length; - if (PYBIND11_BYTES_AS_STRING_AND_SIZE(b.ptr(), &buffer, &length)) - pybind11_fail("Unable to extract bytes contents!"); - auto obj = reinterpret_steal(PyUnicode_FromStringAndSize(buffer, (ssize_t) length)); - if (!obj) - pybind11_fail("Could not allocate string object!"); - m_ptr = obj.release().ptr(); -} - -/// \addtogroup pytypes -/// @{ -class none : public object { -public: - PYBIND11_OBJECT(none, object, detail::PyNone_Check) - none() : object(Py_None, borrowed_t{}) { } -}; - -class ellipsis : public object { -public: - PYBIND11_OBJECT(ellipsis, object, detail::PyEllipsis_Check) - ellipsis() : object(Py_Ellipsis, borrowed_t{}) { } -}; - -class bool_ : public object { -public: - PYBIND11_OBJECT_CVT(bool_, object, PyBool_Check, raw_bool) - bool_() : object(Py_False, borrowed_t{}) { } - // Allow implicit conversion from and to `bool`: - bool_(bool value) : object(value ? Py_True : Py_False, borrowed_t{}) { } - operator bool() const { return m_ptr && PyLong_AsLong(m_ptr) != 0; } - -private: - /// Return the truth value of an object -- always returns a new reference - static PyObject *raw_bool(PyObject *op) { - const auto value = PyObject_IsTrue(op); - if (value == -1) return nullptr; - return handle(value ? Py_True : Py_False).inc_ref().ptr(); - } -}; - -PYBIND11_NAMESPACE_BEGIN(detail) -// Converts a value to the given unsigned type. If an error occurs, you get back (Unsigned) -1; -// otherwise you get back the unsigned long or unsigned long long value cast to (Unsigned). -// (The distinction is critically important when casting a returned -1 error value to some other -// unsigned type: (A)-1 != (B)-1 when A and B are unsigned types of different sizes). -template -Unsigned as_unsigned(PyObject *o) { - if (sizeof(Unsigned) <= sizeof(unsigned long) -#if PY_VERSION_HEX < 0x03000000 - || PyInt_Check(o) -#endif - ) { - unsigned long v = PyLong_AsUnsignedLong(o); - return v == (unsigned long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v; - } - else { - unsigned long long v = PyLong_AsUnsignedLongLong(o); - return v == (unsigned long long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v; - } -} -PYBIND11_NAMESPACE_END(detail) - -class int_ : public object { -public: - PYBIND11_OBJECT_CVT(int_, object, PYBIND11_LONG_CHECK, PyNumber_Long) - int_() : object(PyLong_FromLong(0), stolen_t{}) { } - // Allow implicit conversion from C++ integral types: - template ::value, int> = 0> - int_(T value) { - if (sizeof(T) <= sizeof(long)) { - if (std::is_signed::value) - m_ptr = PyLong_FromLong((long) value); - else - m_ptr = PyLong_FromUnsignedLong((unsigned long) value); - } else { - if (std::is_signed::value) - m_ptr = PyLong_FromLongLong((long long) value); - else - m_ptr = PyLong_FromUnsignedLongLong((unsigned long long) value); - } - if (!m_ptr) pybind11_fail("Could not allocate int object!"); - } - - template ::value, int> = 0> - operator T() const { - return std::is_unsigned::value - ? detail::as_unsigned(m_ptr) - : sizeof(T) <= sizeof(long) - ? (T) PyLong_AsLong(m_ptr) - : (T) PYBIND11_LONG_AS_LONGLONG(m_ptr); - } -}; - -class float_ : public object { -public: - PYBIND11_OBJECT_CVT(float_, object, PyFloat_Check, PyNumber_Float) - // Allow implicit conversion from float/double: - float_(float value) : object(PyFloat_FromDouble((double) value), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate float object!"); - } - float_(double value = .0) : object(PyFloat_FromDouble((double) value), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate float object!"); - } - operator float() const { return (float) PyFloat_AsDouble(m_ptr); } - operator double() const { return (double) PyFloat_AsDouble(m_ptr); } -}; - -class weakref : public object { -public: - PYBIND11_OBJECT_DEFAULT(weakref, object, PyWeakref_Check) - explicit weakref(handle obj, handle callback = {}) - : object(PyWeakref_NewRef(obj.ptr(), callback.ptr()), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate weak reference!"); - } -}; - -class slice : public object { -public: - PYBIND11_OBJECT_DEFAULT(slice, object, PySlice_Check) - slice(ssize_t start_, ssize_t stop_, ssize_t step_) { - int_ start(start_), stop(stop_), step(step_); - m_ptr = PySlice_New(start.ptr(), stop.ptr(), step.ptr()); - if (!m_ptr) pybind11_fail("Could not allocate slice object!"); - } - bool compute(size_t length, size_t *start, size_t *stop, size_t *step, - size_t *slicelength) const { - return PySlice_GetIndicesEx((PYBIND11_SLICE_OBJECT *) m_ptr, - (ssize_t) length, (ssize_t *) start, - (ssize_t *) stop, (ssize_t *) step, - (ssize_t *) slicelength) == 0; - } - bool compute(ssize_t length, ssize_t *start, ssize_t *stop, ssize_t *step, - ssize_t *slicelength) const { - return PySlice_GetIndicesEx((PYBIND11_SLICE_OBJECT *) m_ptr, - length, start, - stop, step, - slicelength) == 0; - } -}; - -class capsule : public object { -public: - PYBIND11_OBJECT_DEFAULT(capsule, object, PyCapsule_CheckExact) - PYBIND11_DEPRECATED("Use reinterpret_borrow() or reinterpret_steal()") - capsule(PyObject *ptr, bool is_borrowed) : object(is_borrowed ? object(ptr, borrowed_t{}) : object(ptr, stolen_t{})) { } - - explicit capsule(const void *value, const char *name = nullptr, void (*destructor)(PyObject *) = nullptr) - : object(PyCapsule_New(const_cast(value), name, destructor), stolen_t{}) { - if (!m_ptr) - pybind11_fail("Could not allocate capsule object!"); - } - - PYBIND11_DEPRECATED("Please pass a destructor that takes a void pointer as input") - capsule(const void *value, void (*destruct)(PyObject *)) - : object(PyCapsule_New(const_cast(value), nullptr, destruct), stolen_t{}) { - if (!m_ptr) - pybind11_fail("Could not allocate capsule object!"); - } - - capsule(const void *value, void (*destructor)(void *)) { - m_ptr = PyCapsule_New(const_cast(value), nullptr, [](PyObject *o) { - auto destructor = reinterpret_cast(PyCapsule_GetContext(o)); - void *ptr = PyCapsule_GetPointer(o, nullptr); - destructor(ptr); - }); - - if (!m_ptr) - pybind11_fail("Could not allocate capsule object!"); - - if (PyCapsule_SetContext(m_ptr, (void *) destructor) != 0) - pybind11_fail("Could not set capsule context!"); - } - - capsule(void (*destructor)()) { - m_ptr = PyCapsule_New(reinterpret_cast(destructor), nullptr, [](PyObject *o) { - auto destructor = reinterpret_cast(PyCapsule_GetPointer(o, nullptr)); - destructor(); - }); - - if (!m_ptr) - pybind11_fail("Could not allocate capsule object!"); - } - - template operator T *() const { - auto name = this->name(); - T * result = static_cast(PyCapsule_GetPointer(m_ptr, name)); - if (!result) pybind11_fail("Unable to extract capsule contents!"); - return result; - } - - const char *name() const { return PyCapsule_GetName(m_ptr); } -}; - -class tuple : public object { -public: - PYBIND11_OBJECT_CVT(tuple, object, PyTuple_Check, PySequence_Tuple) - explicit tuple(size_t size = 0) : object(PyTuple_New((ssize_t) size), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate tuple object!"); - } - size_t size() const { return (size_t) PyTuple_Size(m_ptr); } - bool empty() const { return size() == 0; } - detail::tuple_accessor operator[](size_t index) const { return {*this, index}; } - detail::item_accessor operator[](handle h) const { return object::operator[](h); } - detail::tuple_iterator begin() const { return {*this, 0}; } - detail::tuple_iterator end() const { return {*this, PyTuple_GET_SIZE(m_ptr)}; } -}; - -class dict : public object { -public: - PYBIND11_OBJECT_CVT(dict, object, PyDict_Check, raw_dict) - dict() : object(PyDict_New(), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate dict object!"); - } - template ...>::value>, - // MSVC workaround: it can't compile an out-of-line definition, so defer the collector - typename collector = detail::deferred_t, Args...>> - explicit dict(Args &&...args) : dict(collector(std::forward(args)...).kwargs()) { } - - size_t size() const { return (size_t) PyDict_Size(m_ptr); } - bool empty() const { return size() == 0; } - detail::dict_iterator begin() const { return {*this, 0}; } - detail::dict_iterator end() const { return {}; } - void clear() const { PyDict_Clear(ptr()); } - template bool contains(T &&key) const { - return PyDict_Contains(m_ptr, detail::object_or_cast(std::forward(key)).ptr()) == 1; - } - -private: - /// Call the `dict` Python type -- always returns a new reference - static PyObject *raw_dict(PyObject *op) { - if (PyDict_Check(op)) - return handle(op).inc_ref().ptr(); - return PyObject_CallFunctionObjArgs((PyObject *) &PyDict_Type, op, nullptr); - } -}; - -class sequence : public object { -public: - PYBIND11_OBJECT_DEFAULT(sequence, object, PySequence_Check) - size_t size() const { - ssize_t result = PySequence_Size(m_ptr); - if (result == -1) - throw error_already_set(); - return (size_t) result; - } - bool empty() const { return size() == 0; } - detail::sequence_accessor operator[](size_t index) const { return {*this, index}; } - detail::item_accessor operator[](handle h) const { return object::operator[](h); } - detail::sequence_iterator begin() const { return {*this, 0}; } - detail::sequence_iterator end() const { return {*this, PySequence_Size(m_ptr)}; } -}; - -class list : public object { -public: - PYBIND11_OBJECT_CVT(list, object, PyList_Check, PySequence_List) - explicit list(size_t size = 0) : object(PyList_New((ssize_t) size), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate list object!"); - } - size_t size() const { return (size_t) PyList_Size(m_ptr); } - bool empty() const { return size() == 0; } - detail::list_accessor operator[](size_t index) const { return {*this, index}; } - detail::item_accessor operator[](handle h) const { return object::operator[](h); } - detail::list_iterator begin() const { return {*this, 0}; } - detail::list_iterator end() const { return {*this, PyList_GET_SIZE(m_ptr)}; } - template void append(T &&val) const { - PyList_Append(m_ptr, detail::object_or_cast(std::forward(val)).ptr()); - } - template void insert(size_t index, T &&val) const { - PyList_Insert(m_ptr, static_cast(index), - detail::object_or_cast(std::forward(val)).ptr()); - } -}; - -class args : public tuple { PYBIND11_OBJECT_DEFAULT(args, tuple, PyTuple_Check) }; -class kwargs : public dict { PYBIND11_OBJECT_DEFAULT(kwargs, dict, PyDict_Check) }; - -class set : public object { -public: - PYBIND11_OBJECT_CVT(set, object, PySet_Check, PySet_New) - set() : object(PySet_New(nullptr), stolen_t{}) { - if (!m_ptr) pybind11_fail("Could not allocate set object!"); - } - size_t size() const { return (size_t) PySet_Size(m_ptr); } - bool empty() const { return size() == 0; } - template bool add(T &&val) const { - return PySet_Add(m_ptr, detail::object_or_cast(std::forward(val)).ptr()) == 0; - } - void clear() const { PySet_Clear(m_ptr); } - template bool contains(T &&val) const { - return PySet_Contains(m_ptr, detail::object_or_cast(std::forward(val)).ptr()) == 1; - } -}; - -class function : public object { -public: - PYBIND11_OBJECT_DEFAULT(function, object, PyCallable_Check) - handle cpp_function() const { - handle fun = detail::get_function(m_ptr); - if (fun && PyCFunction_Check(fun.ptr())) - return fun; - return handle(); - } - bool is_cpp_function() const { return (bool) cpp_function(); } -}; - -class staticmethod : public object { -public: - PYBIND11_OBJECT_CVT(staticmethod, object, detail::PyStaticMethod_Check, PyStaticMethod_New) -}; - -class buffer : public object { -public: - PYBIND11_OBJECT_DEFAULT(buffer, object, PyObject_CheckBuffer) - - buffer_info request(bool writable = false) const { - int flags = PyBUF_STRIDES | PyBUF_FORMAT; - if (writable) flags |= PyBUF_WRITABLE; - Py_buffer *view = new Py_buffer(); - if (PyObject_GetBuffer(m_ptr, view, flags) != 0) { - delete view; - throw error_already_set(); - } - return buffer_info(view); - } -}; - -class memoryview : public object { -public: - PYBIND11_OBJECT_CVT(memoryview, object, PyMemoryView_Check, PyMemoryView_FromObject) - - /** \rst - Creates ``memoryview`` from ``buffer_info``. - - ``buffer_info`` must be created from ``buffer::request()``. Otherwise - throws an exception. - - For creating a ``memoryview`` from objects that support buffer protocol, - use ``memoryview(const object& obj)`` instead of this constructor. - \endrst */ - explicit memoryview(const buffer_info& info) { - if (!info.view()) - pybind11_fail("Prohibited to create memoryview without Py_buffer"); - // Note: PyMemoryView_FromBuffer never increments obj reference. - m_ptr = (info.view()->obj) ? - PyMemoryView_FromObject(info.view()->obj) : - PyMemoryView_FromBuffer(info.view()); - if (!m_ptr) - pybind11_fail("Unable to create memoryview from buffer descriptor"); - } - - /** \rst - Creates ``memoryview`` from static buffer. - - This method is meant for providing a ``memoryview`` for C/C++ buffer not - managed by Python. The caller is responsible for managing the lifetime - of ``ptr`` and ``format``, which MUST outlive the memoryview constructed - here. - - See also: Python C API documentation for `PyMemoryView_FromBuffer`_. - - .. _PyMemoryView_FromBuffer: https://docs.python.org/c-api/memoryview.html#c.PyMemoryView_FromBuffer - - :param ptr: Pointer to the buffer. - :param itemsize: Byte size of an element. - :param format: Pointer to the null-terminated format string. For - homogeneous Buffers, this should be set to - ``format_descriptor::value``. - :param shape: Shape of the tensor (1 entry per dimension). - :param strides: Number of bytes between adjacent entries (for each - per dimension). - :param readonly: Flag to indicate if the underlying storage may be - written to. - \endrst */ - static memoryview from_buffer( - void *ptr, ssize_t itemsize, const char *format, - detail::any_container shape, - detail::any_container strides, bool readonly = false); - - static memoryview from_buffer( - const void *ptr, ssize_t itemsize, const char *format, - detail::any_container shape, - detail::any_container strides) { - return memoryview::from_buffer( - const_cast(ptr), itemsize, format, shape, strides, true); - } - - template - static memoryview from_buffer( - T *ptr, detail::any_container shape, - detail::any_container strides, bool readonly = false) { - return memoryview::from_buffer( - reinterpret_cast(ptr), sizeof(T), - format_descriptor::value, shape, strides, readonly); - } - - template - static memoryview from_buffer( - const T *ptr, detail::any_container shape, - detail::any_container strides) { - return memoryview::from_buffer( - const_cast(ptr), shape, strides, true); - } - -#if PY_MAJOR_VERSION >= 3 - /** \rst - Creates ``memoryview`` from static memory. - - This method is meant for providing a ``memoryview`` for C/C++ buffer not - managed by Python. The caller is responsible for managing the lifetime - of ``mem``, which MUST outlive the memoryview constructed here. - - This method is not available in Python 2. - - See also: Python C API documentation for `PyMemoryView_FromBuffer`_. - - .. _PyMemoryView_FromMemory: https://docs.python.org/c-api/memoryview.html#c.PyMemoryView_FromMemory - \endrst */ - static memoryview from_memory(void *mem, ssize_t size, bool readonly = false) { - PyObject* ptr = PyMemoryView_FromMemory( - reinterpret_cast(mem), size, - (readonly) ? PyBUF_READ : PyBUF_WRITE); - if (!ptr) - pybind11_fail("Could not allocate memoryview object!"); - return memoryview(object(ptr, stolen_t{})); - } - - static memoryview from_memory(const void *mem, ssize_t size) { - return memoryview::from_memory(const_cast(mem), size, true); - } -#endif -}; - -#ifndef DOXYGEN_SHOULD_SKIP_THIS -inline memoryview memoryview::from_buffer( - void *ptr, ssize_t itemsize, const char* format, - detail::any_container shape, - detail::any_container strides, bool readonly) { - size_t ndim = shape->size(); - if (ndim != strides->size()) - pybind11_fail("memoryview: shape length doesn't match strides length"); - ssize_t size = ndim ? 1 : 0; - for (size_t i = 0; i < ndim; ++i) - size *= (*shape)[i]; - Py_buffer view; - view.buf = ptr; - view.obj = nullptr; - view.len = size * itemsize; - view.readonly = static_cast(readonly); - view.itemsize = itemsize; - view.format = const_cast(format); - view.ndim = static_cast(ndim); - view.shape = shape->data(); - view.strides = strides->data(); - view.suboffsets = nullptr; - view.internal = nullptr; - PyObject* obj = PyMemoryView_FromBuffer(&view); - if (!obj) - throw error_already_set(); - return memoryview(object(obj, stolen_t{})); -} -#endif // DOXYGEN_SHOULD_SKIP_THIS -/// @} pytypes - -/// \addtogroup python_builtins -/// @{ -inline size_t len(handle h) { - ssize_t result = PyObject_Length(h.ptr()); - if (result < 0) - pybind11_fail("Unable to compute length of object"); - return (size_t) result; -} - -inline size_t len_hint(handle h) { -#if PY_VERSION_HEX >= 0x03040000 - ssize_t result = PyObject_LengthHint(h.ptr(), 0); -#else - ssize_t result = PyObject_Length(h.ptr()); -#endif - if (result < 0) { - // Sometimes a length can't be determined at all (eg generators) - // In which case simply return 0 - PyErr_Clear(); - return 0; - } - return (size_t) result; -} - -inline str repr(handle h) { - PyObject *str_value = PyObject_Repr(h.ptr()); - if (!str_value) throw error_already_set(); -#if PY_MAJOR_VERSION < 3 - PyObject *unicode = PyUnicode_FromEncodedObject(str_value, "utf-8", nullptr); - Py_XDECREF(str_value); str_value = unicode; - if (!str_value) throw error_already_set(); -#endif - return reinterpret_steal(str_value); -} - -inline iterator iter(handle obj) { - PyObject *result = PyObject_GetIter(obj.ptr()); - if (!result) { throw error_already_set(); } - return reinterpret_steal(result); -} -/// @} python_builtins - -PYBIND11_NAMESPACE_BEGIN(detail) -template iterator object_api::begin() const { return iter(derived()); } -template iterator object_api::end() const { return iterator::sentinel(); } -template item_accessor object_api::operator[](handle key) const { - return {derived(), reinterpret_borrow(key)}; -} -template item_accessor object_api::operator[](const char *key) const { - return {derived(), pybind11::str(key)}; -} -template obj_attr_accessor object_api::attr(handle key) const { - return {derived(), reinterpret_borrow(key)}; -} -template str_attr_accessor object_api::attr(const char *key) const { - return {derived(), key}; -} -template args_proxy object_api::operator*() const { - return args_proxy(derived().ptr()); -} -template template bool object_api::contains(T &&item) const { - return attr("__contains__")(std::forward(item)).template cast(); -} - -template -pybind11::str object_api::str() const { return pybind11::str(derived()); } - -template -str_attr_accessor object_api::doc() const { return attr("__doc__"); } - -template -handle object_api::get_type() const { return (PyObject *) Py_TYPE(derived().ptr()); } - -template -bool object_api::rich_compare(object_api const &other, int value) const { - int rv = PyObject_RichCompareBool(derived().ptr(), other.derived().ptr(), value); - if (rv == -1) - throw error_already_set(); - return rv == 1; -} - -#define PYBIND11_MATH_OPERATOR_UNARY(op, fn) \ - template object object_api::op() const { \ - object result = reinterpret_steal(fn(derived().ptr())); \ - if (!result.ptr()) \ - throw error_already_set(); \ - return result; \ - } - -#define PYBIND11_MATH_OPERATOR_BINARY(op, fn) \ - template \ - object object_api::op(object_api const &other) const { \ - object result = reinterpret_steal( \ - fn(derived().ptr(), other.derived().ptr())); \ - if (!result.ptr()) \ - throw error_already_set(); \ - return result; \ - } - -PYBIND11_MATH_OPERATOR_UNARY (operator~, PyNumber_Invert) -PYBIND11_MATH_OPERATOR_UNARY (operator-, PyNumber_Negative) -PYBIND11_MATH_OPERATOR_BINARY(operator+, PyNumber_Add) -PYBIND11_MATH_OPERATOR_BINARY(operator+=, PyNumber_InPlaceAdd) -PYBIND11_MATH_OPERATOR_BINARY(operator-, PyNumber_Subtract) -PYBIND11_MATH_OPERATOR_BINARY(operator-=, PyNumber_InPlaceSubtract) -PYBIND11_MATH_OPERATOR_BINARY(operator*, PyNumber_Multiply) -PYBIND11_MATH_OPERATOR_BINARY(operator*=, PyNumber_InPlaceMultiply) -PYBIND11_MATH_OPERATOR_BINARY(operator/, PyNumber_TrueDivide) -PYBIND11_MATH_OPERATOR_BINARY(operator/=, PyNumber_InPlaceTrueDivide) -PYBIND11_MATH_OPERATOR_BINARY(operator|, PyNumber_Or) -PYBIND11_MATH_OPERATOR_BINARY(operator|=, PyNumber_InPlaceOr) -PYBIND11_MATH_OPERATOR_BINARY(operator&, PyNumber_And) -PYBIND11_MATH_OPERATOR_BINARY(operator&=, PyNumber_InPlaceAnd) -PYBIND11_MATH_OPERATOR_BINARY(operator^, PyNumber_Xor) -PYBIND11_MATH_OPERATOR_BINARY(operator^=, PyNumber_InPlaceXor) -PYBIND11_MATH_OPERATOR_BINARY(operator<<, PyNumber_Lshift) -PYBIND11_MATH_OPERATOR_BINARY(operator<<=, PyNumber_InPlaceLshift) -PYBIND11_MATH_OPERATOR_BINARY(operator>>, PyNumber_Rshift) -PYBIND11_MATH_OPERATOR_BINARY(operator>>=, PyNumber_InPlaceRshift) - -#undef PYBIND11_MATH_OPERATOR_UNARY -#undef PYBIND11_MATH_OPERATOR_BINARY - -PYBIND11_NAMESPACE_END(detail) -PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/spaces/CVPR/LIVE/pybind11/tests/object.h b/spaces/CVPR/LIVE/pybind11/tests/object.h deleted file mode 100644 index 9235f19c20bff3afb59c6880a84c809205eff6ea..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/pybind11/tests/object.h +++ /dev/null @@ -1,175 +0,0 @@ -#if !defined(__OBJECT_H) -#define __OBJECT_H - -#include -#include "constructor_stats.h" - -/// Reference counted object base class -class Object { -public: - /// Default constructor - Object() { print_default_created(this); } - - /// Copy constructor - Object(const Object &) : m_refCount(0) { print_copy_created(this); } - - /// Return the current reference count - int getRefCount() const { return m_refCount; }; - - /// Increase the object's reference count by one - void incRef() const { ++m_refCount; } - - /** \brief Decrease the reference count of - * the object and possibly deallocate it. - * - * The object will automatically be deallocated once - * the reference count reaches zero. - */ - void decRef(bool dealloc = true) const { - --m_refCount; - if (m_refCount == 0 && dealloc) - delete this; - else if (m_refCount < 0) - throw std::runtime_error("Internal error: reference count < 0!"); - } - - virtual std::string toString() const = 0; -protected: - /** \brief Virtual protected deconstructor. - * (Will only be called by \ref ref) - */ - virtual ~Object() { print_destroyed(this); } -private: - mutable std::atomic m_refCount { 0 }; -}; - -// Tag class used to track constructions of ref objects. When we track constructors, below, we -// track and print out the actual class (e.g. ref), and *also* add a fake tracker for -// ref_tag. This lets us check that the total number of ref constructors/destructors is -// correct without having to check each individual ref type individually. -class ref_tag {}; - -/** - * \brief Reference counting helper - * - * The \a ref refeference template is a simple wrapper to store a - * pointer to an object. It takes care of increasing and decreasing - * the reference count of the object. When the last reference goes - * out of scope, the associated object will be deallocated. - * - * \ingroup libcore - */ -template class ref { -public: - /// Create a nullptr reference - ref() : m_ptr(nullptr) { print_default_created(this); track_default_created((ref_tag*) this); } - - /// Construct a reference from a pointer - ref(T *ptr) : m_ptr(ptr) { - if (m_ptr) ((Object *) m_ptr)->incRef(); - - print_created(this, "from pointer", m_ptr); track_created((ref_tag*) this, "from pointer"); - - } - - /// Copy constructor - ref(const ref &r) : m_ptr(r.m_ptr) { - if (m_ptr) - ((Object *) m_ptr)->incRef(); - - print_copy_created(this, "with pointer", m_ptr); track_copy_created((ref_tag*) this); - } - - /// Move constructor - ref(ref &&r) : m_ptr(r.m_ptr) { - r.m_ptr = nullptr; - - print_move_created(this, "with pointer", m_ptr); track_move_created((ref_tag*) this); - } - - /// Destroy this reference - ~ref() { - if (m_ptr) - ((Object *) m_ptr)->decRef(); - - print_destroyed(this); track_destroyed((ref_tag*) this); - } - - /// Move another reference into the current one - ref& operator=(ref&& r) { - print_move_assigned(this, "pointer", r.m_ptr); track_move_assigned((ref_tag*) this); - - if (*this == r) - return *this; - if (m_ptr) - ((Object *) m_ptr)->decRef(); - m_ptr = r.m_ptr; - r.m_ptr = nullptr; - return *this; - } - - /// Overwrite this reference with another reference - ref& operator=(const ref& r) { - print_copy_assigned(this, "pointer", r.m_ptr); track_copy_assigned((ref_tag*) this); - - if (m_ptr == r.m_ptr) - return *this; - if (m_ptr) - ((Object *) m_ptr)->decRef(); - m_ptr = r.m_ptr; - if (m_ptr) - ((Object *) m_ptr)->incRef(); - return *this; - } - - /// Overwrite this reference with a pointer to another object - ref& operator=(T *ptr) { - print_values(this, "assigned pointer"); track_values((ref_tag*) this, "assigned pointer"); - - if (m_ptr == ptr) - return *this; - if (m_ptr) - ((Object *) m_ptr)->decRef(); - m_ptr = ptr; - if (m_ptr) - ((Object *) m_ptr)->incRef(); - return *this; - } - - /// Compare this reference with another reference - bool operator==(const ref &r) const { return m_ptr == r.m_ptr; } - - /// Compare this reference with another reference - bool operator!=(const ref &r) const { return m_ptr != r.m_ptr; } - - /// Compare this reference with a pointer - bool operator==(const T* ptr) const { return m_ptr == ptr; } - - /// Compare this reference with a pointer - bool operator!=(const T* ptr) const { return m_ptr != ptr; } - - /// Access the object referenced by this reference - T* operator->() { return m_ptr; } - - /// Access the object referenced by this reference - const T* operator->() const { return m_ptr; } - - /// Return a C++ reference to the referenced object - T& operator*() { return *m_ptr; } - - /// Return a const C++ reference to the referenced object - const T& operator*() const { return *m_ptr; } - - /// Return a pointer to the referenced object - operator T* () { return m_ptr; } - - /// Return a const pointer to the referenced object - T* get_ptr() { return m_ptr; } - - /// Return a pointer to the referenced object - const T* get_ptr() const { return m_ptr; } -private: - T *m_ptr; -}; - -#endif /* __OBJECT_H */ diff --git a/spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators.h b/spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators.h deleted file mode 100644 index f86ea20521811911e53812320e134a1e5c68079c..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include -#include -#include -#include - diff --git a/spaces/CVPR/WALT/mmdet/models/dense_heads/retina_head.py b/spaces/CVPR/WALT/mmdet/models/dense_heads/retina_head.py deleted file mode 100644 index b12416fa8332f02b9a04bbfc7926f6d13875e61b..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/dense_heads/retina_head.py +++ /dev/null @@ -1,114 +0,0 @@ -import torch.nn as nn -from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init - -from ..builder import HEADS -from .anchor_head import AnchorHead - - -@HEADS.register_module() -class RetinaHead(AnchorHead): - r"""An anchor-based head used in `RetinaNet - `_. - - The head contains two subnetworks. The first classifies anchor boxes and - the second regresses deltas for the anchors. - - Example: - >>> import torch - >>> self = RetinaHead(11, 7) - >>> x = torch.rand(1, 7, 32, 32) - >>> cls_score, bbox_pred = self.forward_single(x) - >>> # Each anchor predicts a score for each class except background - >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors - >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors - >>> assert cls_per_anchor == (self.num_classes) - >>> assert box_per_anchor == 4 - """ - - def __init__(self, - num_classes, - in_channels, - stacked_convs=4, - conv_cfg=None, - norm_cfg=None, - anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - **kwargs): - self.stacked_convs = stacked_convs - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - super(RetinaHead, self).__init__( - num_classes, - in_channels, - anchor_generator=anchor_generator, - **kwargs) - - def _init_layers(self): - """Initialize layers of the head.""" - self.relu = nn.ReLU(inplace=True) - self.cls_convs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.retina_cls = nn.Conv2d( - self.feat_channels, - self.num_anchors * self.cls_out_channels, - 3, - padding=1) - self.retina_reg = nn.Conv2d( - self.feat_channels, self.num_anchors * 4, 3, padding=1) - - def init_weights(self): - """Initialize weights of the head.""" - for m in self.cls_convs: - normal_init(m.conv, std=0.01) - for m in self.reg_convs: - normal_init(m.conv, std=0.01) - bias_cls = bias_init_with_prob(0.01) - normal_init(self.retina_cls, std=0.01, bias=bias_cls) - normal_init(self.retina_reg, std=0.01) - - def forward_single(self, x): - """Forward feature of a single scale level. - - Args: - x (Tensor): Features of a single scale level. - - Returns: - tuple: - cls_score (Tensor): Cls scores for a single scale level - the channels number is num_anchors * num_classes. - bbox_pred (Tensor): Box energies / deltas for a single scale - level, the channels number is num_anchors * 4. - """ - cls_feat = x - reg_feat = x - for cls_conv in self.cls_convs: - cls_feat = cls_conv(cls_feat) - for reg_conv in self.reg_convs: - reg_feat = reg_conv(reg_feat) - cls_score = self.retina_cls(cls_feat) - bbox_pred = self.retina_reg(reg_feat) - return cls_score, bbox_pred diff --git a/spaces/CVPR/WALT/walt/datasets/coco.py b/spaces/CVPR/WALT/walt/datasets/coco.py deleted file mode 100644 index 5b8b55461b68d0c5c667d9c07503bb98ab14d65b..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/walt/datasets/coco.py +++ /dev/null @@ -1,519 +0,0 @@ -__author__ = 'tylin' -__version__ = '2.0' -# Interface for accessing the Microsoft COCO dataset. - -# Microsoft COCO is a large image dataset designed for object detection, -# segmentation, and caption generation. pycocotools is a Python API that -# assists in loading, parsing and visualizing the annotations in COCO. -# Please visit http://mscoco.org/ for more information on COCO, including -# for the data, paper, and tutorials. The exact format of the annotations -# is also described on the COCO website. For example usage of the pycocotools -# please see pycocotools_demo.ipynb. In addition to this API, please download -# both the COCO images and annotations in order to run the demo. - -# An alternative to using the API is to load the annotations directly -# into Python dictionary -# Using the API provides additional utility functions. Note that this API -# supports both *instance* and *caption* annotations. In the case of -# captions not all functions are defined (e.g. categories are undefined). - -# The following API functions are defined: -# COCO - COCO api class that loads COCO annotation file and prepare data -# structures. -# decodeMask - Decode binary mask M encoded via run-length encoding. -# encodeMask - Encode binary mask M using run-length encoding. -# getAnnIds - Get ann ids that satisfy given filter conditions. -# getCatIds - Get cat ids that satisfy given filter conditions. -# getImgIds - Get img ids that satisfy given filter conditions. -# loadAnns - Load anns with the specified ids. -# loadCats - Load cats with the specified ids. -# loadImgs - Load imgs with the specified ids. -# annToMask - Convert segmentation in an annotation to binary mask. -# showAnns - Display the specified annotations. -# loadRes - Load algorithm results and create API for accessing them. -# download - Download COCO images from mscoco.org server. -# Throughout the API "ann"=annotation, "cat"=category, and "img"=image. -# Help on each functions can be accessed by: "help COCO>function". - -# See also COCO>decodeMask, -# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds, -# COCO>getImgIds, COCO>loadAnns, COCO>loadCats, -# COCO>loadImgs, COCO>annToMask, COCO>showAnns - -# Microsoft COCO Toolbox. version 2.0 -# Data, paper, and tutorials available at: http://mscoco.org/ -# Code written by Piotr Dollar and Tsung-Yi Lin, 2014. -# Licensed under the Simplified BSD License [see bsd.txt] - -import copy -import itertools -import json -import os -import time -from collections import defaultdict -from urllib.request import urlretrieve - -import matplotlib.pyplot as plt -import numpy as np -from matplotlib.collections import PatchCollection -from matplotlib.patches import Polygon - -from . import mask as maskUtils - - -def _isArrayLike(obj): - return hasattr(obj, '__iter__') and hasattr(obj, '__len__') - - -class COCO: - def __init__(self, annotation_file=None): - """ - Constructor of Microsoft COCO helper class for reading and visualizing - annotations. - :param annotation_file (str): location of annotation file - :param image_folder (str): location to the folder that hosts images. - :return: - """ - # load dataset - self.dataset, self.anns, self.cats, self.imgs = dict(), dict(), dict( - ), dict() - self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list) - if annotation_file is not None: - print('loading annotations into memory...') - tic = time.time() - with open(annotation_file, 'r') as f: - dataset = json.load(f) - assert type( - dataset - ) == dict, 'annotation file format {} not supported'.format( - type(dataset)) - print('Done (t={:0.2f}s)'.format(time.time() - tic)) - self.dataset = dataset - self.createIndex() - self.img_ann_map = self.imgToAnns - self.cat_img_map = self.catToImgs - - def createIndex(self): - # create index - print('creating index...') - anns, cats, imgs = {}, {}, {} - imgToAnns, catToImgs = defaultdict(list), defaultdict(list) - if 'annotations' in self.dataset: - for ann in self.dataset['annotations']: - imgToAnns[ann['image_id']].append(ann) - anns[ann['id']] = ann - - if 'images' in self.dataset: - for img in self.dataset['images']: - imgs[img['id']] = img - - if 'categories' in self.dataset: - for cat in self.dataset['categories']: - cats[cat['id']] = cat - - if 'annotations' in self.dataset and 'categories' in self.dataset: - for ann in self.dataset['annotations']: - catToImgs[ann['category_id']].append(ann['image_id']) - - print('index created!') - - # create class members - self.anns = anns - self.imgToAnns = imgToAnns - self.catToImgs = catToImgs - self.imgs = imgs - self.cats = cats - - def info(self): - """ - Print information about the annotation file. - :return: - """ - for key, value in self.dataset['info'].items(): - print('{}: {}'.format(key, value)) - - def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None): - """ - Get ann ids that satisfy given filter conditions. default skips that - filter - :param imgIds (int array) : get anns for given imgs - catIds (int array) : get anns for given cats - areaRng (float array) : get anns for given area range - (e.g. [0 inf]) - iscrowd (boolean) : get anns for given crowd label - (False or True) - :return: ids (int array) : integer array of ann ids - """ - imgIds = imgIds if _isArrayLike(imgIds) else [imgIds] - catIds = catIds if _isArrayLike(catIds) else [catIds] - - if len(imgIds) == len(catIds) == len(areaRng) == 0: - anns = self.dataset['annotations'] - else: - if not len(imgIds) == 0: - lists = [ - self.imgToAnns[imgId] for imgId in imgIds - if imgId in self.imgToAnns - ] - anns = list(itertools.chain.from_iterable(lists)) - else: - anns = self.dataset['annotations'] - anns = anns if len(catIds) == 0 else [ - ann for ann in anns if ann['category_id'] in catIds - ] - anns = anns if len(areaRng) == 0 else [ - ann for ann in anns - if ann['area'] > areaRng[0] and ann['area'] < areaRng[1] - ] - if iscrowd is not None: - ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd] - else: - ids = [ann['id'] for ann in anns] - return ids - - def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None): - return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd) - - def getCatIds(self, catNms=[], supNms=[], catIds=[]): - """ - filtering parameters. default skips that filter. - :param catNms (str array) : get cats for given cat names - :param supNms (str array) : get cats for given supercategory names - :param catIds (int array) : get cats for given cat ids - :return: ids (int array) : integer array of cat ids - """ - catNms = catNms if _isArrayLike(catNms) else [catNms] - supNms = supNms if _isArrayLike(supNms) else [supNms] - catIds = catIds if _isArrayLike(catIds) else [catIds] - - if len(catNms) == len(supNms) == len(catIds) == 0: - cats = self.dataset['categories'] - else: - cats = self.dataset['categories'] - cats = cats if len(catNms) == 0 else [ - cat for cat in cats if cat['name'] in catNms - ] - cats = cats if len(supNms) == 0 else [ - cat for cat in cats if cat['supercategory'] in supNms - ] - cats = cats if len(catIds) == 0 else [ - cat for cat in cats if cat['id'] in catIds - ] - ids = [cat['id'] for cat in cats] - return ids - - def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]): - return self.getCatIds(cat_names, sup_names, cat_ids) - - def getImgIds(self, imgIds=[], catIds=[]): - ''' - Get img ids that satisfy given filter conditions. - :param imgIds (int array) : get imgs for given ids - :param catIds (int array) : get imgs with all given cats - :return: ids (int array) : integer array of img ids - ''' - imgIds = imgIds if _isArrayLike(imgIds) else [imgIds] - catIds = catIds if _isArrayLike(catIds) else [catIds] - - if len(imgIds) == len(catIds) == 0: - ids = self.imgs.keys() - else: - ids = set(imgIds) - for i, catId in enumerate(catIds): - if i == 0 and len(ids) == 0: - ids = set(self.catToImgs[catId]) - else: - ids &= set(self.catToImgs[catId]) - return list(ids) - - def get_img_ids(self, img_ids=[], cat_ids=[]): - return self.getImgIds(img_ids, cat_ids) - - def loadAnns(self, ids=[]): - """ - Load anns with the specified ids. - :param ids (int array) : integer ids specifying anns - :return: anns (object array) : loaded ann objects - """ - if _isArrayLike(ids): - return [self.anns[id] for id in ids] - elif type(ids) == int: - return [self.anns[ids]] - - load_anns = loadAnns - - def loadCats(self, ids=[]): - """ - Load cats with the specified ids. - :param ids (int array) : integer ids specifying cats - :return: cats (object array) : loaded cat objects - """ - if _isArrayLike(ids): - return [self.cats[id] for id in ids] - elif type(ids) == int: - return [self.cats[ids]] - - load_cats = loadCats - - def loadImgs(self, ids=[]): - """ - Load anns with the specified ids. - :param ids (int array) : integer ids specifying img - :return: imgs (object array) : loaded img objects - """ - if _isArrayLike(ids): - return [self.imgs[id] for id in ids] - elif type(ids) == int: - return [self.imgs[ids]] - - load_imgs = loadImgs - - def showAnns(self, anns, draw_bbox=False): - """ - Display the specified annotations. - :param anns (array of object): annotations to display - :return: None - """ - if len(anns) == 0: - return 0 - if 'segmentation' in anns[0] or 'keypoints' in anns[0]: - datasetType = 'instances' - elif 'caption' in anns[0]: - datasetType = 'captions' - else: - raise Exception('datasetType not supported') - if datasetType == 'instances': - ax = plt.gca() - ax.set_autoscale_on(False) - polygons = [] - color = [] - for ann in anns: - c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] - if 'segmentation' in ann: - if type(ann['segmentation']) == list: - # polygon - for seg in ann['segmentation']: - poly = np.array(seg).reshape( - (int(len(seg) / 2), 2)) - polygons.append(Polygon(poly)) - color.append(c) - else: - # mask - t = self.imgs[ann['image_id']] - if type(ann['segmentation']['counts']) == list: - rle = maskUtils.frPyObjects([ann['segmentation']], - t['height'], - t['width']) - else: - rle = [ann['segmentation']] - m = maskUtils.decode(rle) - img = np.ones((m.shape[0], m.shape[1], 3)) - if ann['iscrowd'] == 1: - color_mask = np.array([2.0, 166.0, 101.0]) / 255 - if ann['iscrowd'] == 0: - color_mask = np.random.random((1, 3)).tolist()[0] - for i in range(3): - img[:, :, i] = color_mask[i] - ax.imshow(np.dstack((img, m * 0.5))) - if 'keypoints' in ann and type(ann['keypoints']) == list: - # turn skeleton into zero-based index - sks = np.array( - self.loadCats(ann['category_id'])[0]['skeleton']) - 1 - kp = np.array(ann['keypoints']) - x = kp[0::3] - y = kp[1::3] - v = kp[2::3] - for sk in sks: - if np.all(v[sk] > 0): - plt.plot(x[sk], y[sk], linewidth=3, color=c) - plt.plot(x[v > 0], - y[v > 0], - 'o', - markersize=8, - markerfacecolor=c, - markeredgecolor='k', - markeredgewidth=2) - plt.plot(x[v > 1], - y[v > 1], - 'o', - markersize=8, - markerfacecolor=c, - markeredgecolor=c, - markeredgewidth=2) - - if draw_bbox: - [bbox_x, bbox_y, bbox_w, bbox_h] = ann['bbox'] - poly = [[bbox_x, bbox_y], [bbox_x, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y]] - np_poly = np.array(poly).reshape((4, 2)) - polygons.append(Polygon(np_poly)) - color.append(c) - - p = PatchCollection(polygons, - facecolor=color, - linewidths=0, - alpha=0.4) - ax.add_collection(p) - p = PatchCollection(polygons, - facecolor='none', - edgecolors=color, - linewidths=2) - ax.add_collection(p) - elif datasetType == 'captions': - for ann in anns: - print(ann['caption']) - - def loadRes(self, resFile): - """ - Load result file and return a result api object. - :param resFile (str) : file name of result file - :return: res (obj) : result api object - """ - res = COCO() - res.dataset['images'] = [img for img in self.dataset['images']] - - print('Loading and preparing results...') - tic = time.time() - if type(resFile) == str: - with open(resFile) as f: - anns = json.load(f) - elif type(resFile) == np.ndarray: - anns = self.loadNumpyAnnotations(resFile) - else: - anns = resFile - assert type(anns) == list, 'results in not an array of objects' - annsImgIds = [ann['image_id'] for ann in anns] - assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \ - 'Results do not correspond to current coco set' - if 'caption' in anns[0]: - imgIds = set([img['id'] for img in res.dataset['images']]) & set( - [ann['image_id'] for ann in anns]) - res.dataset['images'] = [ - img for img in res.dataset['images'] if img['id'] in imgIds - ] - for id, ann in enumerate(anns): - ann['id'] = id + 1 - elif 'bbox' in anns[0] and not anns[0]['bbox'] == []: - res.dataset['categories'] = copy.deepcopy( - self.dataset['categories']) - for id, ann in enumerate(anns): - bb = ann['bbox'] - x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]] - if 'segmentation' not in ann: - ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]] - ann['area'] = bb[2] * bb[3] - ann['id'] = id + 1 - ann['iscrowd'] = 0 - elif 'segmentation' in anns[0]: - res.dataset['categories'] = copy.deepcopy( - self.dataset['categories']) - for id, ann in enumerate(anns): - # now only support compressed RLE format as segmentation - # results - ann['area'] = maskUtils.area(ann['segmentation']) - if 'bbox' not in ann: - ann['bbox'] = maskUtils.toBbox(ann['segmentation']) - ann['id'] = id + 1 - ann['iscrowd'] = 0 - elif 'keypoints' in anns[0]: - res.dataset['categories'] = copy.deepcopy( - self.dataset['categories']) - for id, ann in enumerate(anns): - s = ann['keypoints'] - x = s[0::3] - y = s[1::3] - x0, x1, y0, y1 = np.min(x), np.max(x), np.min(y), np.max(y) - ann['area'] = (x1 - x0) * (y1 - y0) - ann['id'] = id + 1 - ann['bbox'] = [x0, y0, x1 - x0, y1 - y0] - print('DONE (t={:0.2f}s)'.format(time.time() - tic)) - - res.dataset['annotations'] = anns - res.createIndex() - return res - - def download(self, tarDir=None, imgIds=[]): - ''' - Download COCO images from mscoco.org server. - :param tarDir (str): COCO results directory name - imgIds (list): images to be downloaded - :return: - ''' - if tarDir is None: - print('Please specify target directory') - return -1 - if len(imgIds) == 0: - imgs = self.imgs.values() - else: - imgs = self.loadImgs(imgIds) - N = len(imgs) - if not os.path.exists(tarDir): - os.makedirs(tarDir) - for i, img in enumerate(imgs): - tic = time.time() - fname = os.path.join(tarDir, img['file_name']) - if not os.path.exists(fname): - urlretrieve(img['coco_url'], fname) - print('downloaded {}/{} images (t={:0.1f}s)'.format( - i, N, - time.time() - tic)) - - def loadNumpyAnnotations(self, data): - """ - Convert result data from a numpy array [Nx7] where each row contains - {imageID,x1,y1,w,h,score,class} - :param data (numpy.ndarray) - :return: annotations (python nested list) - """ - print('Converting ndarray to lists...') - assert (type(data) == np.ndarray) - print(data.shape) - assert (data.shape[1] == 7) - N = data.shape[0] - ann = [] - for i in range(N): - if i % 1000000 == 0: - print('{}/{}'.format(i, N)) - ann += [{ - 'image_id': int(data[i, 0]), - 'bbox': [data[i, 1], data[i, 2], data[i, 3], data[i, 4]], - 'score': data[i, 5], - 'category_id': int(data[i, 6]), - }] - return ann - - def annToRLE(self, ann): - """ - Convert annotation which can be polygons, uncompressed RLE to RLE. - :return: binary mask (numpy 2D array) - """ - t = self.imgs[ann['image_id']] - h, w = t['height'], t['width'] - segm = ann['segmentation'] - if type(segm) == list: - # polygon -- a single object might consist of multiple parts - # we merge all parts into one mask rle code - rles = maskUtils.frPyObjects(segm, h, w) - rle = maskUtils.merge(rles) - elif type(segm['counts']) == list: - # uncompressed RLE - rle = maskUtils.frPyObjects(segm, h, w) - else: - # rle - rle = ann['segmentation'] - return rle - - ann_to_rle = annToRLE - - def annToMask(self, ann): - """ - Convert annotation which can be polygons, uncompressed RLE, or RLE to - binary mask. - :return: binary mask (numpy 2D array) - """ - rle = self.annToRLE(ann) - m = maskUtils.decode(rle) - return m - - ann_to_mask = annToMask diff --git a/spaces/CVPR/drawings-to-human/frontend/src/app.html b/spaces/CVPR/drawings-to-human/frontend/src/app.html deleted file mode 100644 index bdf448af4caa312d7b281fd38347e33ee406443c..0000000000000000000000000000000000000000 --- a/spaces/CVPR/drawings-to-human/frontend/src/app.html +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - %sveltekit.head% - - - %sveltekit.body% - - diff --git a/spaces/CVPR/regionclip-demo/detectron2/data/transforms/__init__.py b/spaces/CVPR/regionclip-demo/detectron2/data/transforms/__init__.py deleted file mode 100644 index ab3c63b5b456a7fb878757e25768a3634f76ae5b..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/data/transforms/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from fvcore.transforms.transform import Transform, TransformList # order them first -from fvcore.transforms.transform import * -from .transform import * -from .augmentation import * -from .augmentation_impl import * - -__all__ = [k for k in globals().keys() if not k.startswith("_")] - - -from detectron2.utils.env import fixup_module_metadata - -fixup_module_metadata(__name__, globals(), __all__) -del fixup_module_metadata diff --git a/spaces/CVPR/regionclip-demo/detectron2/modeling/mmdet_wrapper.py b/spaces/CVPR/regionclip-demo/detectron2/modeling/mmdet_wrapper.py deleted file mode 100644 index df0fe2c6f070e7a63c1b9a0464ce81e9160c12b3..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/modeling/mmdet_wrapper.py +++ /dev/null @@ -1,277 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import logging -import numpy as np -from collections import OrderedDict -from collections.abc import Mapping -from typing import Dict, List, Optional, Tuple, Union -import torch -from omegaconf import DictConfig, OmegaConf -from torch import Tensor, nn - -from detectron2.layers import ShapeSpec -from detectron2.structures import BitMasks, Boxes, ImageList, Instances -from detectron2.utils.events import get_event_storage - -from .backbone import Backbone - -logger = logging.getLogger(__name__) - - -def _to_container(cfg): - """ - mmdet will assert the type of dict/list. - So convert omegaconf objects to dict/list. - """ - if isinstance(cfg, DictConfig): - cfg = OmegaConf.to_container(cfg, resolve=True) - from mmcv.utils import ConfigDict - - return ConfigDict(cfg) - - -class MMDetBackbone(Backbone): - """ - Wrapper of mmdetection backbones to use in detectron2. - - mmdet backbones produce list/tuple of tensors, while detectron2 backbones - produce a dict of tensors. This class wraps the given backbone to produce - output in detectron2's convention, so it can be used in place of detectron2 - backbones. - """ - - def __init__( - self, - backbone: Union[nn.Module, Mapping], - neck: Union[nn.Module, Mapping, None] = None, - *, - pretrained_backbone: Optional[str] = None, - output_shapes: List[ShapeSpec], - output_names: Optional[List[str]] = None, - ): - """ - Args: - backbone: either a backbone module or a mmdet config dict that defines a - backbone. The backbone takes a 4D image tensor and returns a - sequence of tensors. - neck: either a backbone module or a mmdet config dict that defines a - neck. The neck takes outputs of backbone and returns a - sequence of tensors. If None, no neck is used. - pretrained_backbone: defines the backbone weights that can be loaded by - mmdet, such as "torchvision://resnet50". - output_shapes: shape for every output of the backbone (or neck, if given). - stride and channels are often needed. - output_names: names for every output of the backbone (or neck, if given). - By default, will use "out0", "out1", ... - """ - super().__init__() - if isinstance(backbone, Mapping): - from mmdet.models import build_backbone - - backbone = build_backbone(_to_container(backbone)) - self.backbone = backbone - - if isinstance(neck, Mapping): - from mmdet.models import build_neck - - neck = build_neck(_to_container(neck)) - self.neck = neck - - # It's confusing that backbone weights are given as a separate argument, - # but "neck" weights, if any, are part of neck itself. This is the interface - # of mmdet so we follow it. Reference: - # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py - logger.info(f"Initializing mmdet backbone weights: {pretrained_backbone} ...") - self.backbone.init_weights(pretrained_backbone) - # train() in mmdet modules is non-trivial, and has to be explicitly - # called. Reference: - # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py - self.backbone.train() - if self.neck is not None: - logger.info("Initializing mmdet neck weights ...") - if isinstance(self.neck, nn.Sequential): - for m in self.neck: - m.init_weights() - else: - self.neck.init_weights() - self.neck.train() - - self._output_shapes = output_shapes - if not output_names: - output_names = [f"out{i}" for i in range(len(output_shapes))] - self._output_names = output_names - - def forward(self, x) -> Dict[str, Tensor]: - outs = self.backbone(x) - if self.neck is not None: - outs = self.neck(outs) - assert isinstance( - outs, (list, tuple) - ), "mmdet backbone should return a list/tuple of tensors!" - if len(outs) != len(self._output_shapes): - raise ValueError( - "Length of output_shapes does not match outputs from the mmdet backbone: " - f"{len(outs)} != {len(self._output_shapes)}" - ) - return {k: v for k, v in zip(self._output_names, outs)} - - def output_shape(self) -> Dict[str, ShapeSpec]: - return {k: v for k, v in zip(self._output_names, self._output_shapes)} - - -class MMDetDetector(nn.Module): - """ - Wrapper of a mmdetection detector model, for detection and instance segmentation. - Input/output formats of this class follow detectron2's convention, so a - mmdetection model can be trained and evaluated in detectron2. - """ - - def __init__( - self, - detector: Union[nn.Module, Mapping], - *, - # Default is 32 regardless of model: - # https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets - size_divisibility=32, - pixel_mean: Tuple[float], - pixel_std: Tuple[float], - ): - """ - Args: - detector: a mmdet detector, or a mmdet config dict that defines a detector. - size_divisibility: pad input images to multiple of this number - pixel_mean: per-channel mean to normalize input image - pixel_std: per-channel stddev to normalize input image - """ - super().__init__() - if isinstance(detector, Mapping): - from mmdet.models import build_detector - - detector = build_detector(_to_container(detector)) - self.detector = detector - self.size_divisibility = size_divisibility - - self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) - self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) - assert ( - self.pixel_mean.shape == self.pixel_std.shape - ), f"{self.pixel_mean} and {self.pixel_std} have different shapes!" - - def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]): - images = [x["image"].to(self.device) for x in batched_inputs] - images = [(x - self.pixel_mean) / self.pixel_std for x in images] - images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor - metas = [] - rescale = {"height" in x for x in batched_inputs} - if len(rescale) != 1: - raise ValueError("Some inputs have original height/width, but some don't!") - rescale = list(rescale)[0] - output_shapes = [] - for input in batched_inputs: - meta = {} - c, h, w = input["image"].shape - meta["img_shape"] = meta["ori_shape"] = (h, w, c) - if rescale: - scale_factor = np.array( - [w / input["width"], h / input["height"]] * 2, dtype="float32" - ) - ori_shape = (input["height"], input["width"]) - output_shapes.append(ori_shape) - meta["ori_shape"] = ori_shape + (c,) - else: - scale_factor = 1.0 - output_shapes.append((h, w)) - meta["scale_factor"] = scale_factor - meta["flip"] = False - padh, padw = images.shape[-2:] - meta["pad_shape"] = (padh, padw, c) - metas.append(meta) - - if self.training: - gt_instances = [x["instances"].to(self.device) for x in batched_inputs] - if gt_instances[0].has("gt_masks"): - from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks - - def convert_mask(m, shape): - # mmdet mask format - if isinstance(m, BitMasks): - return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1]) - else: - return mm_PolygonMasks(m.polygons, shape[0], shape[1]) - - gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances] - losses_and_metrics = self.detector.forward_train( - images, - metas, - [x.gt_boxes.tensor for x in gt_instances], - [x.gt_classes for x in gt_instances], - gt_masks=gt_masks, - ) - else: - losses_and_metrics = self.detector.forward_train( - images, - metas, - [x.gt_boxes.tensor for x in gt_instances], - [x.gt_classes for x in gt_instances], - ) - return _parse_losses(losses_and_metrics) - else: - results = self.detector.simple_test(images, metas, rescale=rescale) - results = [ - {"instances": _convert_mmdet_result(r, shape)} - for r, shape in zip(results, output_shapes) - ] - return results - - @property - def device(self): - return self.pixel_mean.device - - -# Reference: show_result() in -# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py -def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances: - if isinstance(result, tuple): - bbox_result, segm_result = result - if isinstance(segm_result, tuple): - segm_result = segm_result[0] - else: - bbox_result, segm_result = result, None - - bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5 - bboxes, scores = bboxes[:, :4], bboxes[:, -1] - labels = [ - torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result) - ] - labels = torch.cat(labels) - inst = Instances(shape) - inst.pred_boxes = Boxes(bboxes) - inst.scores = scores - inst.pred_classes = labels - - if segm_result is not None and len(labels) > 0: - segm_result = list(itertools.chain(*segm_result)) - segm_result = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in segm_result] - segm_result = torch.stack(segm_result, dim=0) - inst.pred_masks = segm_result - return inst - - -# reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py -def _parse_losses(losses: Dict[str, Tensor]) -> Dict[str, Tensor]: - log_vars = OrderedDict() - for loss_name, loss_value in losses.items(): - if isinstance(loss_value, torch.Tensor): - log_vars[loss_name] = loss_value.mean() - elif isinstance(loss_value, list): - log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) - else: - raise TypeError(f"{loss_name} is not a tensor or list of tensors") - - if "loss" not in loss_name: - # put metrics to storage; don't return them - storage = get_event_storage() - value = log_vars.pop(loss_name).cpu().item() - storage.put_scalar(loss_name, value) - return log_vars diff --git a/spaces/Candyraider/Proxy4/Dockerfile b/spaces/Candyraider/Proxy4/Dockerfile deleted file mode 100644 index 6c01c09373883afcb4ea34ae2d316cd596e1737b..0000000000000000000000000000000000000000 --- a/spaces/Candyraider/Proxy4/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM node:18-bullseye-slim - -RUN apt-get update && \ - -apt-get install -y git - -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app - -WORKDIR /app - -RUN npm install - -COPY Dockerfile greeting.md* .env* ./ - -RUN npm run build - -EXPOSE 7860 - -ENV NODE_ENV=production - -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/ChandraMohanNayal/AutoGPT/autogpt/memory/pinecone.py b/spaces/ChandraMohanNayal/AutoGPT/autogpt/memory/pinecone.py deleted file mode 100644 index 27fcd62482d0cf44e02fa1c339195be58cb745b0..0000000000000000000000000000000000000000 --- a/spaces/ChandraMohanNayal/AutoGPT/autogpt/memory/pinecone.py +++ /dev/null @@ -1,75 +0,0 @@ -import pinecone -from colorama import Fore, Style - -from autogpt.llm_utils import create_embedding_with_ada -from autogpt.logs import logger -from autogpt.memory.base import MemoryProviderSingleton - - -class PineconeMemory(MemoryProviderSingleton): - def __init__(self, cfg): - pinecone_api_key = cfg.pinecone_api_key - pinecone_region = cfg.pinecone_region - pinecone.init(api_key=pinecone_api_key, environment=pinecone_region) - dimension = 1536 - metric = "cosine" - pod_type = "p1" - table_name = "auto-gpt" - # this assumes we don't start with memory. - # for now this works. - # we'll need a more complicated and robust system if we want to start with - # memory. - self.vec_num = 0 - - try: - pinecone.whoami() - except Exception as e: - logger.typewriter_log( - "FAILED TO CONNECT TO PINECONE", - Fore.RED, - Style.BRIGHT + str(e) + Style.RESET_ALL, - ) - logger.double_check( - "Please ensure you have setup and configured Pinecone properly for use." - + f"You can check out {Fore.CYAN + Style.BRIGHT}" - "https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup" - f"{Style.RESET_ALL} to ensure you've set up everything correctly." - ) - exit(1) - - if table_name not in pinecone.list_indexes(): - pinecone.create_index( - table_name, dimension=dimension, metric=metric, pod_type=pod_type - ) - self.index = pinecone.Index(table_name) - - def add(self, data): - vector = create_embedding_with_ada(data) - # no metadata here. We may wish to change that long term. - self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})]) - _text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}" - self.vec_num += 1 - return _text - - def get(self, data): - return self.get_relevant(data, 1) - - def clear(self): - self.index.delete(deleteAll=True) - return "Obliviated" - - def get_relevant(self, data, num_relevant=5): - """ - Returns all the data in the memory that is relevant to the given data. - :param data: The data to compare to. - :param num_relevant: The number of relevant data to return. Defaults to 5 - """ - query_embedding = create_embedding_with_ada(data) - results = self.index.query( - query_embedding, top_k=num_relevant, include_metadata=True - ) - sorted_results = sorted(results.matches, key=lambda x: x.score) - return [str(item["metadata"]["raw_text"]) for item in sorted_results] - - def get_stats(self): - return self.index.describe_index_stats() diff --git a/spaces/Chujinze/Res2Net/detect.py b/spaces/Chujinze/Res2Net/detect.py deleted file mode 100644 index f788c9a5b0b07e681616e9fbf5fcea7f4404f673..0000000000000000000000000000000000000000 --- a/spaces/Chujinze/Res2Net/detect.py +++ /dev/null @@ -1,332 +0,0 @@ -import gradio as gr -import torch.nn as nn -import math -import torch.utils.model_zoo as model_zoo -import torch -import torch.nn.functional as F - -__all__ = ['Res2Net', 'res2net50_v1b', 'res2net101_v1b'] - -model_urls = { - 'res2net50_v1b_26w_4s': 'https://shanghuagao.oss-cn-beijing.aliyuncs.com/res2net/res2net50_v1b_26w_4s-3cf99910.pth', - 'res2net101_v1b_26w_4s': 'https://shanghuagao.oss-cn-beijing.aliyuncs.com/res2net/res2net101_v1b_26w_4s-0812c246.pth', -} - - -class Bottle2neck(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None, baseWidth=26, scale=4, stype='normal'): - """ Constructor - Args: - inplanes: input channel dimensionality - planes: output channel dimensionality - stride: conv stride. Replaces pooling layer. - downsample: None when stride = 1 - baseWidth: basic width of conv3x3 - scale: number of scale. - type: 'normal': normal set. 'stage': first block of a new stage. - """ - super(Bottle2neck, self).__init__() - - width = int(math.floor(planes * (baseWidth / 64.0))) - self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False) - self.bn1 = nn.BatchNorm2d(width * scale) - - if scale == 1: - self.nums = 1 - else: - self.nums = scale - 1 - if stype == 'stage': - self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) - convs = [] - bns = [] - for i in range(self.nums): - convs.append(nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, bias=False)) - bns.append(nn.BatchNorm2d(width)) - self.convs = nn.ModuleList(convs) - self.bns = nn.ModuleList(bns) - - self.conv3 = nn.Conv2d(width * scale, planes * self.expansion, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * self.expansion) - - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stype = stype - self.scale = scale - self.width = width - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - spx = torch.split(out, self.width, 1) - for i in range(self.nums): - if i == 0 or self.stype == 'stage': - sp = spx[i] - else: - sp = sp + spx[i] - sp = self.convs[i](sp) - sp = self.relu(self.bns[i](sp)) - if i == 0: - out = sp - else: - out = torch.cat((out, sp), 1) - if self.scale != 1 and self.stype == 'normal': - out = torch.cat((out, spx[self.nums]), 1) - elif self.scale != 1 and self.stype == 'stage': - out = torch.cat((out, self.pool(spx[self.nums])), 1) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class Res2Net(nn.Module): - - def __init__(self, block, layers, baseWidth=26, scale=4, num_classes=1000): - self.inplanes = 64 - super(Res2Net, self).__init__() - self.baseWidth = baseWidth - self.scale = scale - self.conv1 = nn.Sequential( - nn.Conv2d(3, 32, 3, 2, 1, bias=False), - nn.BatchNorm2d(32), - nn.ReLU(inplace=True), - nn.Conv2d(32, 32, 3, 1, 1, bias=False), - nn.BatchNorm2d(32), - nn.ReLU(inplace=True), - nn.Conv2d(32, 64, 3, 1, 1, bias=False) - ) - self.bn1 = nn.BatchNorm2d(64) - self.relu = nn.ReLU() - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.layer1 = self._make_layer(block, 64, layers[0]) - self.layer2 = self._make_layer(block, 128, layers[1], stride=2) - self.layer3 = self._make_layer(block, 256, layers[2], stride=2) - self.layer4 = self._make_layer(block, 512, layers[3], stride=2) - self.avgpool = nn.AdaptiveAvgPool2d(1) - self.fc = nn.Linear(512 * block.expansion, num_classes) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - def _make_layer(self, block, planes, blocks, stride=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.AvgPool2d(kernel_size=stride, stride=stride, - ceil_mode=True, count_include_pad=False), - nn.Conv2d(self.inplanes, planes * block.expansion, - kernel_size=1, stride=1, bias=False), - nn.BatchNorm2d(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample=downsample, - stype='stage', baseWidth=self.baseWidth, scale=self.scale)) - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes, baseWidth=self.baseWidth, scale=self.scale)) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x = self.avgpool(x) - x = x.view(x.size(0), -1) - x = self.fc(x) - - return x - - -def res2net50_v1b(pretrained=False, **kwargs): - """Constructs a Res2Net-50_v1b model. - Res2Net-50 refers to the Res2Net-50_v1b_26w_4s. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth=26, scale=4, **kwargs) - if pretrained: - model.load_state_dict(model_zoo.load_url(model_urls['res2net50_v1b_26w_4s'])) - return model - - -def res2net101_v1b(pretrained=False, **kwargs): - """Constructs a Res2Net-50_v1b_26w_4s model. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = Res2Net(Bottle2neck, [3, 4, 23, 3], baseWidth=26, scale=4, **kwargs) - if pretrained: - model.load_state_dict(model_zoo.load_url(model_urls['res2net101_v1b_26w_4s'])) - return model - - -def res2net50_v1b_26w_4s(pretrained=False, **kwargs): - """Constructs a Res2Net-50_v1b_26w_4s model. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth=26, scale=4, **kwargs) - if pretrained: - model.load_state_dict(torch.load(pthfile, map_location='cpu')) # load model - return model - - -def res2net101_v1b_26w_4s(pretrained=False, **kwargs): - """Constructs a Res2Net-50_v1b_26w_4s model. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = Res2Net(Bottle2neck, [3, 4, 23, 3], baseWidth=26, scale=4, **kwargs) - if pretrained: - model.load_state_dict(model_zoo.load_url(model_urls['res2net101_v1b_26w_4s'])) - return model - - -def res2net152_v1b_26w_4s(pretrained=False, **kwargs): - """Constructs a Res2Net-50_v1b_26w_4s model. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = Res2Net(Bottle2neck, [3, 8, 36, 3], baseWidth=26, scale=4, **kwargs) - if pretrained: - model.load_state_dict(model_zoo.load_url(model_urls['res2net152_v1b_26w_4s'])) - return model - - -class mutil_model(nn.Module): - - def __init__(self, category_num=10): - super(mutil_model, self).__init__() - self.model1 = res2net50_v1b_26w_4s(pretrained=False) - self.model1.fc = nn.Sequential( - nn.Linear(in_features=2048, out_features=category_num, bias=True), - ) - self.model2 = torch.load('./enet_b2_8' + '.pt', map_location=torch.device('cpu')) - self.model2.classifier = nn.Sequential( - nn.Linear(in_features=1408, out_features=category_num, bias=True), - ) - self.fc = nn.Linear(in_features=category_num * 2, out_features=category_num, bias=True) - - def forward(self, x): - x1 = self.model1(x) - x2 = self.model2(x) - x = torch.cat((x1, x2), 1) - x = self.fc(x) - return x - - -pth_path = 'G:/morror art/image_loader10new_model.pt' -category_num = 10 - -# "cuda" only when GPUs are available. -#device = "cuda" if torch.cuda.is_available() else "cpu" -device = "cpu" -#Initialize a model, and put it on the device specified. -# 导入res2net预训练模型 -pthfile = 'G:/morror art/pre_train_model/res2net50_v1b.pth' -model = res2net50_v1b_26w_4s(pretrained=False) -# 修改全连接层,输出维度为预测 分类 -num_ftrs = model.fc.in_features -model.fc = nn.Sequential( - nn.Linear(in_features=2048, out_features=1000, bias=True), - nn.Dropout(0.5), - nn.Linear(1000, out_features=category_num) - ) -model.fc = nn.Sequential( - nn.Linear(in_features=2048, out_features=category_num, bias=True), -) - -model = model.to(device) -model.device = device -model.load_state_dict(torch.load(pth_path,torch.device('cpu'))) -model.eval() - - -# 增加人脸识别模型 -#model = mutil_model(category_num=7) -#model_state = torch.load('./add_face_emotion_model_7.pt', map_location=torch.device('cpu')).state_dict() -#model.load_state_dict(model_state) # 加载模型参数 -#model.eval() - -labels = ['中国风', '古典', '电子', '摇滚', '乡村', '说唱', '民谣', '二次元', '轻音乐', '儿歌'] - -import requests -import torch - -import gradio as gr -import torchvision.transforms as transforms - -# import cv2 -# from PIL import Image -# PIL -# from PIL import Image -# inception_net = tf.keras.applications.MobileNetV2() # load the model - -# Download human-readable labels for ImageNet. -# response = requests.get("https://git.io/JJkYN") -# labels = response.text.split("\n") -print(len(labels)) - - -def classify_image(inp): - # inp = inp.convert('RGB') - # inp = Image.fromarray(inp.astype('uint8'), 'RGB') - transform_test = transforms.Compose([ - # transforms.ToPILImage(), - transforms.Resize((256, 256)), - transforms.ToTensor(), - transforms.Normalize((0.485, 0.456, 0.406), - (0.229, 0.224, 0.225)), - ]) - inp = transform_test(inp) - print(inp) - with torch.no_grad(): - prediction = model(torch.unsqueeze(inp, 0)).flatten() - print(prediction) - prediction = torch.nn.Softmax(dim=0)(prediction) - print(prediction) - return {labels[i]: float(prediction[i].item()) for i in range(len(labels))} - - -# print(classify_image("/jj.jpg")) -# image = gr.inputs.Image(shape=(256, 256)) -# image = gr.inputs.Image() -# print(image) -# label = gr.outputs.Label(num_top_classes=6) - -gr.Interface( - classify_image, - # gr.inputs.Image(), - gr.inputs.Image(type='pil'), - outputs='label' - # inputs='image', - # outputs='label', - # examples=[["images/cheetah1.jpg"], ["images/lion.jpg"]], -).launch(share=True) -# share=True \ No newline at end of file diff --git a/spaces/CjangCjengh/Sanskrit-TTS/models.py b/spaces/CjangCjengh/Sanskrit-TTS/models.py deleted file mode 100644 index 0a722b1a69fa5b5bd96da7cf225664df181cd027..0000000000000000000000000000000000000000 --- a/spaces/CjangCjengh/Sanskrit-TTS/models.py +++ /dev/null @@ -1,535 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - if self.n_vocab!=0: - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - if self.n_vocab!=0: - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - diff --git a/spaces/Clebersla/RVC_V2_Huggingface_Version/lib/infer_pack/commons.py b/spaces/Clebersla/RVC_V2_Huggingface_Version/lib/infer_pack/commons.py deleted file mode 100644 index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000 --- a/spaces/Clebersla/RVC_V2_Huggingface_Version/lib/infer_pack/commons.py +++ /dev/null @@ -1,166 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/CofAI/chat.b4/g4f/utils.py b/spaces/CofAI/chat.b4/g4f/utils.py deleted file mode 100644 index d5ab41c79b44ab81e1843d209cb342bd83dafb42..0000000000000000000000000000000000000000 --- a/spaces/CofAI/chat.b4/g4f/utils.py +++ /dev/null @@ -1,49 +0,0 @@ -import browser_cookie3 - - -class Utils: - browsers = [ - browser_cookie3.chrome, # 62.74% market share - browser_cookie3.safari, # 24.12% market share - browser_cookie3.firefox, # 4.56% market share - browser_cookie3.edge, # 2.85% market share - browser_cookie3.opera, # 1.69% market share - browser_cookie3.brave, # 0.96% market share - browser_cookie3.opera_gx, # 0.64% market share - browser_cookie3.vivaldi, # 0.32% market share - ] - - def get_cookies(domain: str, setName: str = None, setBrowser: str = False) -> dict: - cookies = {} - - if setBrowser != False: - for browser in Utils.browsers: - if browser.__name__ == setBrowser: - try: - for c in browser(domain_name=domain): - if c.name not in cookies: - cookies = cookies | {c.name: c.value} - - except Exception as e: - pass - - else: - for browser in Utils.browsers: - try: - for c in browser(domain_name=domain): - if c.name not in cookies: - cookies = cookies | {c.name: c.value} - - except Exception as e: - pass - - if setName: - try: - return {setName: cookies[setName]} - - except ValueError: - print(f'Error: could not find {setName} cookie in any browser.') - exit(1) - - else: - return cookies diff --git a/spaces/CofAI/chat/client/css/main.css b/spaces/CofAI/chat/client/css/main.css deleted file mode 100644 index ec1f1dd80247747912e1976413a1e3897f1308db..0000000000000000000000000000000000000000 --- a/spaces/CofAI/chat/client/css/main.css +++ /dev/null @@ -1,14 +0,0 @@ -.main-container { - display: flex; - padding: var(--section-gap); - height: 100vh; - justify-content: center; - box-sizing: border-box; -} - -@media screen and (max-width: 360px) { - .main-container { - padding: 0px; - height: 90vh; - } -} \ No newline at end of file diff --git a/spaces/Cong723/gpt-academic-public/docs/README_EN.md b/spaces/Cong723/gpt-academic-public/docs/README_EN.md deleted file mode 100644 index db214f5327b8cdcd84ed1c57390c3b24ba83d78f..0000000000000000000000000000000000000000 --- a/spaces/Cong723/gpt-academic-public/docs/README_EN.md +++ /dev/null @@ -1,291 +0,0 @@ -> **Note** -> -> This English README is automatically generated by the markdown translation plugin in this project, and may not be 100% correct. -> - -# ChatGPT Academic Optimization - -**If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request. We also have a [README in English](docs/README_EN.md) translated by this project itself.** - -> **Note** -> -> 1. Please note that only **functions with red color** supports reading files, some functions are located in the **dropdown menu** of plugins. Additionally, we welcome and prioritize any new plugin PRs with **highest priority**! -> -> 2. The functionality of each file in this project is detailed in the self-translation report [`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) of the project. With the iteration of the version, you can also click on the relevant function plugins at any time to call GPT to regenerate the self-analysis report of the project. The FAQ summary is in the [`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98) section. -> - - -
    - -Function | Description ---- | --- -One-Click Polish | Supports one-click polishing and finding grammar errors in academic papers. -One-Key Translation Between Chinese and English | One-click translation between Chinese and English. -One-Key Code Interpretation | Can correctly display and interpret code. -[Custom Shortcut Keys](https://www.bilibili.com/video/BV14s4y1E7jN) | Supports custom shortcut keys. -[Configure Proxy Server](https://www.bilibili.com/video/BV1rc411W7Dr) | Supports configuring proxy servers. -Modular Design | Supports custom high-order function plugins and [function plugins], and plugins support [hot updates](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). -[Self-programming Analysis](https://www.bilibili.com/video/BV1cj411A7VW) | [Function Plugin] [One-Key Read] (https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) The source code of this project is analyzed. -[Program Analysis](https://www.bilibili.com/video/BV1cj411A7VW) | [Function Plugin] One-click can analyze the project tree of other Python/C/C++/Java/Lua/... projects -Read the Paper | [Function Plugin] One-click interpretation of the full text of latex paper and generation of abstracts -Latex Full Text Translation, Proofreading | [Function Plugin] One-click translation or proofreading of latex papers. -Batch Comment Generation | [Function Plugin] One-click batch generation of function comments -Chat Analysis Report Generation | [Function Plugin] After running, an automatic summary report will be generated -[Arxiv Assistant](https://www.bilibili.com/video/BV1LM4y1279X) | [Function Plugin] Enter the arxiv article url to translate the abstract and download the PDF with one click -[Full-text Translation Function of PDF Paper](https://www.bilibili.com/video/BV1KT411x7Wn) | [Function Plugin] Extract the title & abstract of the PDF paper + translate the full text (multithreading) -[Google Scholar Integration Assistant](https://www.bilibili.com/video/BV19L411U7ia) | [Function Plugin] Given any Google Scholar search page URL, let gpt help you choose interesting articles. -Formula / Picture / Table Display | Can display both the tex form and the rendering form of formulas at the same time, support formula and code highlighting -Multithreaded Function Plugin Support | Supports multi-threaded calling chatgpt, one-click processing of massive text or programs -Start Dark Gradio [Theme](https://github.com/binary-husky/chatgpt_academic/issues/173) | Add ```/?__dark-theme=true``` at the end of the browser url to switch to dark theme -[Multiple LLM Models](https://www.bilibili.com/video/BV1wT411p7yf) support, [API2D](https://api2d.com/) interface support | It must feel nice to be served by both GPT3.5, GPT4, and [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B)! -Huggingface non-Science Net [Online Experience](https://huggingface.co/spaces/qingxu98/gpt-academic) | After logging in to huggingface, copy [this space](https://huggingface.co/spaces/qingxu98/gpt-academic) -... | ... - -
    - - -- New interface (switch between "left-right layout" and "up-down layout" by modifying the LAYOUT option in config.py) -
    - -
    - - -- All buttons are dynamically generated by reading functional.py and can add custom functionality at will, freeing up clipboard -
    - -
    - -- Proofreading / correcting -
    - -
    - -- If the output contains formulas, it will be displayed in both the tex form and the rendering form at the same time, which is convenient for copying and reading -
    - -
    - -- Don't want to read the project code? Just take the whole project to chatgpt -
    - -
    - -- Multiple major language model mixing calls (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
    - -
    - -Multiple major language model mixing call [huggingface beta version](https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta) (the huggingface version does not support chatglm) - - ---- - -## Installation-Method 1: Run directly (Windows, Linux or MacOS) - -1. Download project -```sh -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -``` - -2. Configure API_KEY and proxy settings - - -In `config.py`, configure the overseas Proxy and OpenAI API KEY as follows: -``` -1. If you are in China, you need to set up an overseas proxy to use the OpenAI API smoothly. Please read config.py carefully for setup details (1. Modify USE_PROXY to True; 2. Modify proxies according to the instructions). -2. Configure the OpenAI API KEY. You need to register and obtain an API KEY on the OpenAI website. Once you get the API KEY, you can configure it in the config.py file. -3. Issues related to proxy networks (network timeouts, proxy failures) are summarized at https://github.com/binary-husky/chatgpt_academic/issues/1 -``` -(P.S. When the program runs, it will first check whether there is a private configuration file named `config_private.py` and use the same-name configuration in `config.py` to overwrite it. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py` and transfer (copy) the configuration in `config.py` to` config_private.py`. `config_private.py` is not controlled by git and can make your privacy information more secure.)) - - -3. Install dependencies -```sh -# (Option One) Recommended -python -m pip install -r requirements.txt - -# (Option Two) If you use anaconda, the steps are similar: -# (Option Two.1) conda create -n gptac_venv python=3.11 -# (Option Two.2) conda activate gptac_venv -# (Option Two.3) python -m pip install -r requirements.txt - -# Note: Use official pip source or Ali pip source. Other pip sources (such as some university pips) may have problems, and temporary replacement methods are as follows: -# python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -``` - -If you need to support Tsinghua ChatGLM, you need to install more dependencies (if you are not familiar with python or your computer configuration is not good, we recommend not to try): -```sh -python -m pip install -r request_llm/requirements_chatglm.txt -``` - -4. Run -```sh -python main.py -``` - -5. Test function plugins -``` -- Test Python project analysis - In the input area, enter `./crazy_functions/test_project/python/dqn`, and then click "Analyze the entire Python project" -- Test self-code interpretation - Click "[Multithreading Demo] Interpretation of This Project Itself (Source Code Interpretation)" -- Test experimental function template function (requires gpt to answer what happened today in history). You can use this function as a template to implement more complex functions. - Click "[Function Plugin Template Demo] Today in History" -- There are more functions to choose from in the function plugin area drop-down menu. -``` - -## Installation-Method 2: Use Docker (Linux) - -1. ChatGPT only (recommended for most people) -``` sh -# download project -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -# configure overseas Proxy and OpenAI API KEY -Edit config.py with any text editor -# Install -docker build -t gpt-academic . -# Run -docker run --rm -it --net=host gpt-academic - -# Test function plug-in -## Test function plugin template function (requires gpt to answer what happened today in history). You can use this function as a template to implement more complex functions. -Click "[Function Plugin Template Demo] Today in History" -## Test Abstract Writing for Latex Projects -Enter ./crazy_functions/test_project/latex/attention in the input area, and then click "Read Tex Paper and Write Abstract" -## Test Python Project Analysis -Enter ./crazy_functions/test_project/python/dqn in the input area and click "Analyze the entire Python project." - -More functions are available in the function plugin area drop-down menu. -``` - -2. ChatGPT+ChatGLM (requires strong familiarity with docker + strong computer configuration) - -``` sh -# Modify dockerfile -cd docs && nano Dockerfile+ChatGLM -# How to build | 如何构建 (Dockerfile+ChatGLM在docs路径下,请先cd docs) -docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM . -# How to run | 如何运行 (1) 直接运行: -docker run --rm -it --net=host --gpus=all gpt-academic -# How to run | 如何运行 (2) 我想运行之前进容器做一些调整: -docker run --rm -it --net=host --gpus=all gpt-academic bash -``` - - -## Installation-Method 3: Other Deployment Methods - -1. Remote Cloud Server Deployment -Please visit [Deployment Wiki-1] (https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -2. Use WSL2 (Windows Subsystem for Linux) -Please visit [Deployment Wiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - - -## Installation-Proxy Configuration -### Method 1: Conventional method -[Configure Proxy](https://github.com/binary-husky/chatgpt_academic/issues/1) - -### Method Two: Step-by-step tutorial for newcomers -[Step-by-step tutorial for newcomers](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89) - ---- - -## Customizing Convenient Buttons (Customizing Academic Shortcuts) -Open `core_functional.py` with any text editor and add an item as follows, then restart the program (if the button has been successfully added and visible, both the prefix and suffix support hot modification without the need to restart the program to take effect). For example: -``` -"Super English to Chinese translation": { - # Prefix, which will be added before your input. For example, to describe your requirements, such as translation, code interpretation, polishing, etc. - "Prefix": "Please translate the following content into Chinese and use a markdown table to interpret the proprietary terms in the text one by one:\n\n", - - # Suffix, which will be added after your input. For example, combined with the prefix, you can put your input content in quotes. - "Suffix": "", -}, -``` -
    - -
    - ---- - - -## Some Function Displays - -### Image Display: - - -You are a professional academic paper translator. - -
    - -
    - -### If a program can understand and analyze itself: - -
    - -
    - -
    - -
    - -### Analysis of any Python/Cpp project: -
    - -
    - -
    - -
    - -### One-click reading comprehension and summary generation of Latex papers -
    - -
    - -### Automatic report generation -
    - - - -
    - -### Modular functional design -
    - - -
    - -### Source code translation to English - -
    - -
    - -## Todo and version planning: -- version 3.2+ (todo): Function plugin supports more parameter interfaces -- version 3.1: Support for inquiring multiple GPT models at the same time! Support for api2d, support for multiple apikeys load balancing -- version 3.0: Support for chatglm and other small llms -- version 2.6: Refactored the plugin structure, improved interactivity, added more plugins -- version 2.5: Self-updating, solves the problem of text being too long and token overflowing when summarizing large project source code -- version 2.4: (1) Added PDF full text translation function; (2) Added function to switch input area position; (3) Added vertical layout option; (4) Multi-threaded function plugin optimization. -- version 2.3: Enhanced multi-threaded interactivity -- version 2.2: Function plugin supports hot reloading -- version 2.1: Foldable layout -- version 2.0: Introduction of modular function plugins -- version 1.0: Basic functions - -## Reference and learning - -``` -The code design of this project has referenced many other excellent projects, including: - -# Reference project 1: Borrowed many tips from ChuanhuChatGPT -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Reference project 2: Tsinghua ChatGLM-6B: -https://github.com/THUDM/ChatGLM-6B -``` - diff --git a/spaces/Cpp4App/Cpp4App/CDM/run_testing(Used for Adjusting).py b/spaces/Cpp4App/Cpp4App/CDM/run_testing(Used for Adjusting).py deleted file mode 100644 index f97d51edcb95826c6e6aa3e67d987feb4fdd7e4f..0000000000000000000000000000000000000000 --- a/spaces/Cpp4App/Cpp4App/CDM/run_testing(Used for Adjusting).py +++ /dev/null @@ -1,89 +0,0 @@ -from os.path import join as pjoin -import cv2 -import os - - -def resize_height_by_longest_edge(img_path, resize_length=800): - org = cv2.imread(img_path) - height, width = org.shape[:2] - if height > width: - return resize_length - else: - return int(resize_length * (height / width)) - - -def nothing(x): - pass - - -if __name__ == '__main__': - - ''' - ele:min-grad: gradient threshold to produce binary map - ele:ffl-block: fill-flood threshold - ele:min-ele-area: minimum area for selected elements - ele:merge-contained-ele: if True, merge elements contained in others - text:max-word-inline-gap: words with smaller distance than the gap are counted as a line - text:max-line-gap: lines with smaller distance than the gap are counted as a paragraph - - Tips: - 1. Larger *min-grad* produces fine-grained binary-map while prone to over-segment element to small pieces - 2. Smaller *min-ele-area* leaves tiny elements while prone to produce noises - 3. If not *merge-contained-ele*, the elements inside others will be recognized, while prone to produce noises - 4. The *max-word-inline-gap* and *max-line-gap* should be dependent on the input image size and resolution - - mobile: {'min-grad':4, 'ffl-block':5, 'min-ele-area':50, 'max-word-inline-gap':6, 'max-line-gap':1} - web : {'min-grad':3, 'ffl-block':5, 'min-ele-area':25, 'max-word-inline-gap':4, 'max-line-gap':4} - ''' - key_params = {'min-grad':10, 'ffl-block':5, 'min-ele-area':50, 'merge-contained-ele':False, - 'max-word-inline-gap':10, 'max-line-gap':4, 'remove-top-bar':True} - - # set input image path - input_path_img = 'data/input/4.jpg' - output_root = 'data/output' - - resized_height = resize_height_by_longest_edge(input_path_img) - is_clf = False - is_ocr = False - if is_ocr: - import detect_text.text_detection as text - os.makedirs(pjoin(output_root, 'ocr'), exist_ok=True) - text.text_detection(input_path_img, output_root, show=False) - - ''' - ******** Testing with adjustable parameters ******** - ''' - testing_ip = True - testing_merge = False - - cv2.namedWindow('parameters') - if testing_ip: - cv2.createTrackbar('min-grad', 'parameters', 4, 20, nothing) - cv2.createTrackbar('min-ele-area', 'parameters', 20, 200, nothing) - while(1): - key_params['min-grad'] = cv2.getTrackbarPos('min-grad', 'parameters') - key_params['min-ele-area'] = cv2.getTrackbarPos('min-ele-area', 'parameters') - import detect_compo.ip_region_proposal as ip - os.makedirs(pjoin(output_root, 'ip'), exist_ok=True) - # switch of the classification func - classifier = None - if is_clf: - classifier = {} - from cnn.CNN import CNN - # classifier['Image'] = CNN('Image') - classifier['Elements'] = CNN('Elements') - # classifier['Noise'] = CNN('Noise') - ip.compo_detection(input_path_img, output_root, key_params, - classifier=classifier, resize_by_height=resized_height, show=True, wai_key=10) - - if testing_merge: - cv2.createTrackbar('max-word-inline-gap', 'parameters', 4, 20, nothing) - cv2.createTrackbar('max-line-gap', 'parameters', 20, 200, nothing) - while(1): - key_params['max-word-inline-gap'] = cv2.getTrackbarPos('max-word-inline-gap', 'parameters') - key_params['max-line-gap'] = cv2.getTrackbarPos('max-line-gap', 'parameters') - import detect_merge.merge as merge - name = input_path_img.split('/')[-1][:-4] - compo_path = pjoin(output_root, 'ip', str(name) + '.json') - ocr_path = pjoin(output_root, 'ocr', str(name) + '.json') - merge.merge(input_path_img, compo_path, ocr_path, output_root=None, is_remove_top=key_params['remove-top-bar'], show=True, wait_key=10) diff --git a/spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/midas/midas_net_custom.py b/spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/midas/midas_net_custom.py deleted file mode 100644 index 50e4acb5e53d5fabefe3dde16ab49c33c2b7797c..0000000000000000000000000000000000000000 --- a/spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/midas/midas_net_custom.py +++ /dev/null @@ -1,128 +0,0 @@ -"""MidashNet: Network for monocular depth estimation trained by mixing several datasets. -This file contains code that is adapted from -https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py -""" -import torch -import torch.nn as nn - -from .base_model import BaseModel -from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder - - -class MidasNet_small(BaseModel): - """Network for monocular depth estimation. - """ - - def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True, - blocks={'expand': True}): - """Init. - - Args: - path (str, optional): Path to saved model. Defaults to None. - features (int, optional): Number of features. Defaults to 256. - backbone (str, optional): Backbone network for encoder. Defaults to resnet50 - """ - print("Loading weights: ", path) - - super(MidasNet_small, self).__init__() - - use_pretrained = False if path else True - - self.channels_last = channels_last - self.blocks = blocks - self.backbone = backbone - - self.groups = 1 - - features1=features - features2=features - features3=features - features4=features - self.expand = False - if "expand" in self.blocks and self.blocks['expand'] == True: - self.expand = True - features1=features - features2=features*2 - features3=features*4 - features4=features*8 - - self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable) - - self.scratch.activation = nn.ReLU(False) - - self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) - self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) - self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) - self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners) - - - self.scratch.output_conv = nn.Sequential( - nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups), - Interpolate(scale_factor=2, mode="bilinear"), - nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1), - self.scratch.activation, - nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), - nn.ReLU(True) if non_negative else nn.Identity(), - nn.Identity(), - ) - - if path: - self.load(path) - - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input data (image) - - Returns: - tensor: depth - """ - if self.channels_last==True: - print("self.channels_last = ", self.channels_last) - x.contiguous(memory_format=torch.channels_last) - - - layer_1 = self.pretrained.layer1(x) - layer_2 = self.pretrained.layer2(layer_1) - layer_3 = self.pretrained.layer3(layer_2) - layer_4 = self.pretrained.layer4(layer_3) - - layer_1_rn = self.scratch.layer1_rn(layer_1) - layer_2_rn = self.scratch.layer2_rn(layer_2) - layer_3_rn = self.scratch.layer3_rn(layer_3) - layer_4_rn = self.scratch.layer4_rn(layer_4) - - - path_4 = self.scratch.refinenet4(layer_4_rn) - path_3 = self.scratch.refinenet3(path_4, layer_3_rn) - path_2 = self.scratch.refinenet2(path_3, layer_2_rn) - path_1 = self.scratch.refinenet1(path_2, layer_1_rn) - - out = self.scratch.output_conv(path_1) - - return torch.squeeze(out, dim=1) - - - -def fuse_model(m): - prev_previous_type = nn.Identity() - prev_previous_name = '' - previous_type = nn.Identity() - previous_name = '' - for name, module in m.named_modules(): - if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU: - # print("FUSED ", prev_previous_name, previous_name, name) - torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True) - elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d: - # print("FUSED ", prev_previous_name, previous_name) - torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True) - # elif previous_type == nn.Conv2d and type(module) == nn.ReLU: - # print("FUSED ", previous_name, name) - # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True) - - prev_previous_type = previous_type - prev_previous_name = previous_name - previous_type = type(module) - previous_name = name \ No newline at end of file diff --git a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/datasets/video_instruct_dataset.py b/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/datasets/video_instruct_dataset.py deleted file mode 100644 index 7de6e20d30d9b0d7280d706636e9849b7f02618c..0000000000000000000000000000000000000000 --- a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/datasets/video_instruct_dataset.py +++ /dev/null @@ -1,253 +0,0 @@ -import os -from video_llama.datasets.datasets.base_dataset import BaseDataset -from video_llama.datasets.datasets.caption_datasets import CaptionDataset -import pandas as pd -import decord -from decord import VideoReader -import random -import torch -from torch.utils.data.dataloader import default_collate -from PIL import Image -from typing import Dict, Optional, Sequence -import transformers -import pathlib -import json -from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer -import copy -from video_llama.processors import transforms_video,AlproVideoTrainProcessor -from torchvision import transforms -from video_llama.processors.video_processor import ToTHWC,ToUint8,load_video -from video_llama.conversation.conversation_video import Conversation,SeparatorStyle - -DEFAULT_IMAGE_PATCH_TOKEN = '' -video_conversation = Conversation( - system="", - roles=("Human", "Assistant"), - messages=[], - offset=0, - sep_style=SeparatorStyle.SINGLE, - sep="###", -) -IGNORE_INDEX = -100 - -class Video_Instruct_Dataset(BaseDataset): - def __init__(self, vis_processor, text_processor, vis_root, ann_root,num_video_query_token=32,tokenizer_name = '/mnt/workspace/ckpt/vicuna-13b/',data_type = 'video'): - """ - vis_root (string): Root directory of Llava images (e.g. webvid_eval/video/) - ann_root (string): Root directory of video (e.g. webvid_eval/annotations/) - split (string): val or test - """ - super().__init__(vis_processor=vis_processor, text_processor=text_processor) - - data_path = pathlib.Path(ann_root) - with data_path.open(encoding='utf-8') as f: - self.annotation = json.load(f) - - self.num_video_query_token = num_video_query_token - self.vis_root = vis_root - self.resize_size = 224 - self.num_frm = 8 - self.tokenizer = LlamaTokenizer.from_pretrained(tokenizer_name, use_fast=False) - self.tokenizer.pad_token = self.tokenizer.eos_token - self.tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) - self.IMAGE_PATCH_TOKEN_ID = self.tokenizer.get_vocab()[DEFAULT_IMAGE_PATCH_TOKEN] - - self.transform = AlproVideoTrainProcessor( - image_size=self.resize_size, n_frms = self.num_frm - ).transform - self.data_type = data_type - - def _get_video_path(self, sample): - rel_video_fp = sample['video'] - full_video_fp = os.path.join(self.vis_root, rel_video_fp) - return full_video_fp - - def __getitem__(self, index): - num_retries = 10 # skip error videos - for _ in range(num_retries): - try: - sample = self.annotation[index] - - video_path = self._get_video_path(sample) - conversation_list = sample['QA'] - - video, msg = load_video( - video_path=video_path, - n_frms=self.num_frm, - height=self.resize_size, - width=self.resize_size, - sampling ="uniform", return_msg = True - ) - video = self.transform(video) - if 'cn' in self.data_type: - msg = "" - # 添加视频,以及msg到convsation list 0 - sources = preprocess_multimodal(copy.deepcopy(conversation_list), None, cur_token_len=self.num_video_query_token,msg = msg) - new_sources = convert_source_vicuna_format(sources) - - data_dict = preprocess( - new_sources, - self.tokenizer) - data_dict = dict(input_ids=data_dict["input_ids"][0], - labels=data_dict["labels"][0]) - # image exist in the data - data_dict['image'] = video - except: - print(f"Failed to load examples with video: {video_path}. " - f"Will randomly sample an example as a replacement.") - index = random.randint(0, len(self) - 1) - continue - break - else: - raise RuntimeError(f"Failed to fetch video after {num_retries} retries.") - # "image_id" is kept to stay compatible with the COCO evaluation format - return { - "image": video, - "text_input": data_dict["input_ids"], - "labels": data_dict["labels"], - "type":'video', - } - - def __len__(self): - return len(self.annotation) - - def collater(self, instances): - input_ids, labels = tuple([instance[key] for instance in instances] - for key in ("text_input", "labels")) - input_ids = torch.nn.utils.rnn.pad_sequence( - input_ids, - batch_first=True, - padding_value=self.tokenizer.pad_token_id) - labels = torch.nn.utils.rnn.pad_sequence(labels, - batch_first=True, - padding_value=IGNORE_INDEX) - batch = dict( - input_ids=input_ids, - labels=labels, - attention_mask=input_ids.ne(self.tokenizer.pad_token_id), - ) - - if 'image' in instances[0]: - images = [instance['image'] for instance in instances] - if all(x is not None and x.shape == images[0].shape for x in images): - batch['images'] = torch.stack(images) - else: - batch['images'] = images - batch['conv_type'] = 'multi' - return batch - -def convert_source_vicuna_format(sources): - new_sources = [] - for source in sources: - new_source = [] - for i, sentence in enumerate(source): - role_0_msg = sentence['q'] - role_1_msg = sentence['a'] - new_source.append({ - 'from':'human', - 'value': role_0_msg, - }) - new_source.append({ - 'from':'gpt', - 'value': role_1_msg, - }) - new_sources.append(new_source) - return new_sources - -def preprocess_multimodal( - conversation_list: Sequence[str], - multimodal_cfg: dict, - cur_token_len: int, - msg='' -) -> Dict: - # 将conversational list中 - is_multimodal = True - # image_token_len = multimodal_cfg['image_token_len'] - image_token_len = cur_token_len - conversation_list[0]["q"] = " " + msg + conversation_list[0]["q"] - return [conversation_list] - -def _add_speaker_and_signal(header, source, get_conversation=True): - """Add speaker and start/end signal on each round.""" - BEGIN_SIGNAL = "###" - END_SIGNAL = "\n" - conversation = header - for sentence in source: - from_str = sentence["from"] - if from_str.lower() == "human": - from_str = video_conversation.roles[0] - elif from_str.lower() == "gpt": - from_str = video_conversation.roles[1] - else: - from_str = 'unknown' - sentence["value"] = (BEGIN_SIGNAL + from_str + ": " + - sentence["value"] + END_SIGNAL) - if get_conversation: - conversation += sentence["value"] - conversation += BEGIN_SIGNAL - return conversation - -def _tokenize_fn(strings: Sequence[str], - tokenizer: transformers.PreTrainedTokenizer) -> Dict: - """Tokenize a list of strings.""" - tokenized_list = [ - tokenizer( - text, - return_tensors="pt", - padding="longest", - max_length=512, - truncation=True, - ) for text in strings - ] - input_ids = labels = [ - tokenized.input_ids[0] for tokenized in tokenized_list - ] - input_ids_lens = labels_lens = [ - tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() - for tokenized in tokenized_list - ] - return dict( - input_ids=input_ids, - labels=labels, - input_ids_lens=input_ids_lens, - labels_lens=labels_lens, - ) - -def preprocess( - sources: Sequence[str], - tokenizer: transformers.PreTrainedTokenizer, -) -> Dict: - """ - Given a list of sources, each is a conversation list. This transform: - 1. Add signal '### ' at the beginning each sentence, with end signal '\n'; - 2. Concatenate conversations together; - 3. Tokenize the concatenated conversation; - 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX. - """ - # add end signal and concatenate together - conversations = [] - for source in sources: - header = f"{video_conversation.system}\n\n" - conversation = _add_speaker_and_signal(header, source) - conversations.append(conversation) - # tokenize conversations - conversations_tokenized = _tokenize_fn(conversations, tokenizer) - input_ids = conversations_tokenized["input_ids"] - targets = copy.deepcopy(input_ids) - for target, source in zip(targets, sources): - tokenized_lens = _tokenize_fn([header] + [s["value"] for s in source], - tokenizer)["input_ids_lens"] - speakers = [sentence["from"] for sentence in source] - _mask_targets(target, tokenized_lens, speakers) - - return dict(input_ids=input_ids, labels=targets) - -def _mask_targets(target, tokenized_lens, speakers): - # cur_idx = 0 - cur_idx = tokenized_lens[0] - tokenized_lens = tokenized_lens[1:] - target[:cur_idx] = IGNORE_INDEX - for tokenized_len, speaker in zip(tokenized_lens, speakers): - if speaker == "human": - target[cur_idx+2:cur_idx + tokenized_len] = IGNORE_INDEX - cur_idx += tokenized_len diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/staticfiles.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/staticfiles.py deleted file mode 100644 index 299015d4fef268cde91273790251f35192e1c8a6..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/staticfiles.py +++ /dev/null @@ -1 +0,0 @@ -from starlette.staticfiles import StaticFiles as StaticFiles # noqa diff --git a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/evaluation/eval_visible_iou.py b/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/evaluation/eval_visible_iou.py deleted file mode 100644 index be1883d816d784e68f31420c0f85c64485f653ca..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/evaluation/eval_visible_iou.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -@Date: 2021/08/02 -@description: -The 2DIoU for calculating the visible and full boundaries, such as the MP3D dataset, -has the following data: {'train': 0.9775843958583535, 'test': 0.9828616219607289, 'val': 0.9883810438132491}, -indicating that our best performance is limited to below 98.29% 2DIoU using our approach. -""" -import numpy as np -import matplotlib.pyplot as plt - -from tqdm import tqdm -from evaluation.iou import calc_IoU_2D -from visualization.floorplan import draw_iou_floorplan -from utils.conversion import depth2xyz, uv2xyz - - -def eval_dataset_visible_IoU(dataset, show=False): - bar = tqdm(dataset, total=len(dataset), ncols=100) - iou2ds = [] - for data in bar: - bar.set_description(f"Processing {data['id']}") - corners = data['corners'] - corners = corners[corners[..., 0] + corners[..., 1] != 0] # Take effective corners - all_xz = uv2xyz(corners)[..., ::2] - visible_xz = depth2xyz(data['depth'])[..., ::2] - iou2d = calc_IoU_2D(all_xz, visible_xz) - iou2ds.append(iou2d) - if show: - layout_floorplan = draw_iou_floorplan(all_xz, visible_xz, iou2d=iou2d) - plt.imshow(layout_floorplan) - plt.show() - - mean_iou2d = np.array(iou2ds).mean() - return mean_iou2d - - -def execute_eval_dataset_visible_IoU(root_dir, dataset, modes=None): - if modes is None: - modes = ["train", "test", "valid"] - - iou2d_d = {} - for mode in modes: - print("mode: {}".format(mode)) - iou2d = eval_dataset_visible_IoU(dataset(root_dir, mode, patch_num=1024, - keys=['depth', 'visible_corners', 'corners', 'id']), show=False) - iou2d_d[mode] = iou2d - return iou2d_d - - -if __name__ == '__main__': - from dataset.mp3d_dataset import MP3DDataset - - iou2d_d = execute_eval_dataset_visible_IoU(root_dir='../src/dataset/mp3d', - dataset=MP3DDataset, - modes=['train', 'test', 'val']) - print(iou2d_d) diff --git a/spaces/DeeKayG/COCO-Google/Dataset/train2017_5/split_folders.sh b/spaces/DeeKayG/COCO-Google/Dataset/train2017_5/split_folders.sh deleted file mode 100644 index 61fdf4128de06493e2d33c83173411f66617fda1..0000000000000000000000000000000000000000 --- a/spaces/DeeKayG/COCO-Google/Dataset/train2017_5/split_folders.sh +++ /dev/null @@ -1,20 +0,0 @@ -# Set the source directory -SOURCE_DIR="/Volumes/NVMe 4/Proj_YOLO/Dataset/Hugging_Face_Remote/COCO/train2017" - -# Count the number of files -NUM_FILES=$(ls -1q "$SOURCE_DIR" | wc -l) - -# Calculate how many files per directory -FILES_PER_DIR=$(( NUM_FILES / 12 )) - -# Split files across the 12 directories -for i in {1..12}; do - # Create the directory - mkdir -p "${SOURCE_DIR}_$i" - - # Move a subset of files to the new directory - find "$SOURCE_DIR" -maxdepth 1 -type f | head -n $FILES_PER_DIR | xargs -I {} mv {} "${SOURCE_DIR}_$i" - - echo "Moved files to ${SOURCE_DIR}_$i" -done - diff --git a/spaces/Dileepgorantala/dileepVoiceAI/README.md b/spaces/Dileepgorantala/dileepVoiceAI/README.md deleted file mode 100644 index 8e6b29d0b16c14ceb3d14393a125a45e731a8907..0000000000000000000000000000000000000000 --- a/spaces/Dileepgorantala/dileepVoiceAI/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: DileepVoiceAI -emoji: 🌍 -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/mapper/training/train_utils.py b/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/mapper/training/train_utils.py deleted file mode 100644 index 0c55177f7442010bc1fcc64de3d142585c22adc0..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/mapper/training/train_utils.py +++ /dev/null @@ -1,13 +0,0 @@ - -def aggregate_loss_dict(agg_loss_dict): - mean_vals = {} - for output in agg_loss_dict: - for key in output: - mean_vals[key] = mean_vals.setdefault(key, []) + [output[key]] - for key in mean_vals: - if len(mean_vals[key]) > 0: - mean_vals[key] = sum(mean_vals[key]) / len(mean_vals[key]) - else: - print('{} has no value'.format(key)) - mean_vals[key] = 0 - return mean_vals diff --git a/spaces/DragGan/DragGan-Inversion/viz/pickle_widget.py b/spaces/DragGan/DragGan-Inversion/viz/pickle_widget.py deleted file mode 100644 index 6d92f291032749e53758b61fb62753474a9f1ad4..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/viz/pickle_widget.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import glob -import os -import re - -import dnnlib -import imgui -import numpy as np -from gui_utils import imgui_utils - -from . import renderer - -# ---------------------------------------------------------------------------- - - -def _locate_results(pattern): - return pattern - -# ---------------------------------------------------------------------------- - - -class PickleWidget: - def __init__(self, viz): - self.viz = viz - self.search_dirs = [] - self.cur_pkl = None - self.user_pkl = '' - self.recent_pkls = [] - # {tuple(path, ...): [dnnlib.EasyDict(), ...], ...} - self.browse_cache = dict() - self.browse_refocus = False - self.load('', ignore_errors=True) - - def add_recent(self, pkl, ignore_errors=False): - try: - resolved = self.resolve_pkl(pkl) - if resolved not in self.recent_pkls: - self.recent_pkls.append(resolved) - except: - if not ignore_errors: - raise - - def load(self, pkl, ignore_errors=False): - viz = self.viz - viz.clear_result() - viz.skip_frame() # The input field will change on next frame. - try: - resolved = self.resolve_pkl(pkl) - name = resolved.replace('\\', '/').split('/')[-1] - self.cur_pkl = resolved - self.user_pkl = resolved - viz.result.message = f'Loading {name}...' - viz.defer_rendering() - if resolved in self.recent_pkls: - self.recent_pkls.remove(resolved) - self.recent_pkls.insert(0, resolved) - except: - self.cur_pkl = None - self.user_pkl = pkl - if pkl == '': - viz.result = dnnlib.EasyDict( - message='No network pickle loaded') - else: - viz.result = dnnlib.EasyDict( - error=renderer.CapturedException()) - if not ignore_errors: - raise - - @imgui_utils.scoped_by_object_id - def __call__(self, show=True): - viz = self.viz - recent_pkls = [pkl for pkl in self.recent_pkls if pkl != self.user_pkl] - if show: - imgui.text('Pickle') - imgui.same_line(viz.label_w) - idx = self.user_pkl.rfind('/') - changed, self.user_pkl = imgui_utils.input_text('##pkl', self.user_pkl[idx+1:], 1024, - flags=( - imgui.INPUT_TEXT_AUTO_SELECT_ALL | imgui.INPUT_TEXT_ENTER_RETURNS_TRUE), - width=(-1), - help_text=' | | | | /.pkl') - if changed: - self.load(self.user_pkl, ignore_errors=True) - if imgui.is_item_hovered() and not imgui.is_item_active() and self.user_pkl != '': - imgui.set_tooltip(self.user_pkl) - # imgui.same_line() - imgui.text(' ') - imgui.same_line(viz.label_w) - if imgui_utils.button('Recent...', width=viz.button_w, enabled=(len(recent_pkls) != 0)): - imgui.open_popup('recent_pkls_popup') - imgui.same_line() - if imgui_utils.button('Browse...', enabled=len(self.search_dirs) > 0, width=viz.button_w): - imgui.open_popup('browse_pkls_popup') - self.browse_cache.clear() - self.browse_refocus = True - - if imgui.begin_popup('recent_pkls_popup'): - for pkl in recent_pkls: - clicked, _state = imgui.menu_item(pkl) - if clicked: - self.load(pkl, ignore_errors=True) - imgui.end_popup() - - if imgui.begin_popup('browse_pkls_popup'): - def recurse(parents): - key = tuple(parents) - items = self.browse_cache.get(key, None) - if items is None: - items = self.list_runs_and_pkls(parents) - self.browse_cache[key] = items - for item in items: - if item.type == 'run' and imgui.begin_menu(item.name): - recurse([item.path]) - imgui.end_menu() - if item.type == 'pkl': - clicked, _state = imgui.menu_item(item.name) - if clicked: - self.load(item.path, ignore_errors=True) - if len(items) == 0: - with imgui_utils.grayed_out(): - imgui.menu_item('No results found') - recurse(self.search_dirs) - if self.browse_refocus: - imgui.set_scroll_here() - viz.skip_frame() # Focus will change on next frame. - self.browse_refocus = False - imgui.end_popup() - - paths = viz.pop_drag_and_drop_paths() - if paths is not None and len(paths) >= 1: - self.load(paths[0], ignore_errors=True) - - viz.args.pkl = self.cur_pkl - - def list_runs_and_pkls(self, parents): - items = [] - run_regex = re.compile(r'\d+-.*') - pkl_regex = re.compile(r'network-snapshot-\d+\.pkl') - for parent in set(parents): - if os.path.isdir(parent): - for entry in os.scandir(parent): - if entry.is_dir() and run_regex.fullmatch(entry.name): - items.append(dnnlib.EasyDict( - type='run', name=entry.name, path=os.path.join(parent, entry.name))) - if entry.is_file() and pkl_regex.fullmatch(entry.name): - items.append(dnnlib.EasyDict( - type='pkl', name=entry.name, path=os.path.join(parent, entry.name))) - - items = sorted(items, key=lambda item: ( - item.name.replace('_', ' '), item.path)) - return items - - def resolve_pkl(self, pattern): - assert isinstance(pattern, str) - assert pattern != '' - - # URL => return as is. - if dnnlib.util.is_url(pattern): - return pattern - - # Short-hand pattern => locate. - path = _locate_results(pattern) - - # Run dir => pick the last saved snapshot. - if os.path.isdir(path): - pkl_files = sorted( - glob.glob(os.path.join(path, 'network-snapshot-*.pkl'))) - if len(pkl_files) == 0: - raise IOError(f'No network pickle found in "{path}"') - path = pkl_files[-1] - - # Normalize. - path = os.path.abspath(path) - return path - -# ---------------------------------------------------------------------------- diff --git a/spaces/ECCV2022/PARSeq-OCR/app.py b/spaces/ECCV2022/PARSeq-OCR/app.py deleted file mode 100644 index 9af229fa6e1e8ce5230e304f6e7e30b0ddc4bd2c..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/PARSeq-OCR/app.py +++ /dev/null @@ -1,101 +0,0 @@ -# Scene Text Recognition Model Hub -# Copyright 2022 Darwin Bautista -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import glob - -import torch -from torchvision import transforms as T - -import gradio as gr - - -class App: - - title = 'Scene Text Recognition with
    Permuted Autoregressive Sequence Models' - models = ['parseq', 'parseq_tiny', 'abinet', 'crnn', 'trba', 'vitstr'] - - def __init__(self): - self._model_cache = {} - self._preprocess = T.Compose([ - T.Resize((32, 128), T.InterpolationMode.BICUBIC), - T.ToTensor(), - T.Normalize(0.5, 0.5) - ]) - - def _get_model(self, name): - if name in self._model_cache: - return self._model_cache[name] - model = torch.hub.load('baudm/parseq', name, pretrained=True).eval() - self._model_cache[name] = model - return model - - @torch.inference_mode() - def __call__(self, model_name, image): - if image is None: - return '', [] - model = self._get_model(model_name) - image = self._preprocess(image.convert('RGB')).unsqueeze(0) - # Greedy decoding - pred = model(image).softmax(-1) - label, _ = model.tokenizer.decode(pred) - raw_label, raw_confidence = model.tokenizer.decode(pred, raw=True) - # Format confidence values - max_len = 25 if model_name == 'crnn' else len(label[0]) + 1 - conf = list(map('{:0.1f}'.format, raw_confidence[0][:max_len].tolist())) - return label[0], [raw_label[0][:max_len], conf] - - -def main(): - app = App() - - with gr.Blocks(analytics_enabled=False, title=app.title.replace('
    ', ' ')) as demo: - gr.Markdown(f""" -
    - - # {app.title} - [![GitHub](https://img.shields.io/badge/baudm-parseq-blue?logo=github)](https://github.com/baudm/parseq) - -
    - - To use this interactive demo for PARSeq and reproduced models: - 1. Select which model you want to use. - 2. Upload your own cropped image (or select from the given examples), or sketch on the canvas. - 3. Click **Read Text**. - - *NOTE*: None of these models were trained on handwritten text datasets. - """) - model_name = gr.Radio(app.models, value=app.models[0], label='The STR model to use') - with gr.Tabs(): - with gr.TabItem('Image Upload'): - image_upload = gr.Image(type='pil', source='upload', label='Image') - gr.Examples(glob.glob('demo_images/*.*'), inputs=image_upload) - read_upload = gr.Button('Read Text') - with gr.TabItem('Canvas Sketch'): - image_canvas = gr.Image(type='pil', source='canvas', label='Sketch') - read_canvas = gr.Button('Read Text') - - output = gr.Textbox(max_lines=1, label='Model output') - #adv_output = gr.Checkbox(label='Show detailed output') - raw_output = gr.Dataframe(row_count=2, col_count=0, label='Raw output with confidence values ([0, 1] interval; [B] - BLANK token; [E] - EOS token)') - - read_upload.click(app, inputs=[model_name, image_upload], outputs=[output, raw_output]) - read_canvas.click(app, inputs=[model_name, image_canvas], outputs=[output, raw_output]) - #adv_output.change(lambda x: gr.update(visible=x), inputs=adv_output, outputs=raw_output) - - demo.launch() - - -if __name__ == '__main__': - main() diff --git a/spaces/Eddycrack864/Applio-Inference/demucs/model.py b/spaces/Eddycrack864/Applio-Inference/demucs/model.py deleted file mode 100644 index e9d932f4d014f7b95b394d2e24ed5edc379ded8d..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/demucs/model.py +++ /dev/null @@ -1,202 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import math - -import julius -from torch import nn - -from .utils import capture_init, center_trim - - -class BLSTM(nn.Module): - def __init__(self, dim, layers=1): - super().__init__() - self.lstm = nn.LSTM(bidirectional=True, num_layers=layers, hidden_size=dim, input_size=dim) - self.linear = nn.Linear(2 * dim, dim) - - def forward(self, x): - x = x.permute(2, 0, 1) - x = self.lstm(x)[0] - x = self.linear(x) - x = x.permute(1, 2, 0) - return x - - -def rescale_conv(conv, reference): - std = conv.weight.std().detach() - scale = (std / reference)**0.5 - conv.weight.data /= scale - if conv.bias is not None: - conv.bias.data /= scale - - -def rescale_module(module, reference): - for sub in module.modules(): - if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d)): - rescale_conv(sub, reference) - - -class Demucs(nn.Module): - @capture_init - def __init__(self, - sources, - audio_channels=2, - channels=64, - depth=6, - rewrite=True, - glu=True, - rescale=0.1, - resample=True, - kernel_size=8, - stride=4, - growth=2., - lstm_layers=2, - context=3, - normalize=False, - samplerate=44100, - segment_length=4 * 10 * 44100): - """ - Args: - sources (list[str]): list of source names - audio_channels (int): stereo or mono - channels (int): first convolution channels - depth (int): number of encoder/decoder layers - rewrite (bool): add 1x1 convolution to each encoder layer - and a convolution to each decoder layer. - For the decoder layer, `context` gives the kernel size. - glu (bool): use glu instead of ReLU - resample_input (bool): upsample x2 the input and downsample /2 the output. - rescale (int): rescale initial weights of convolutions - to get their standard deviation closer to `rescale` - kernel_size (int): kernel size for convolutions - stride (int): stride for convolutions - growth (float): multiply (resp divide) number of channels by that - for each layer of the encoder (resp decoder) - lstm_layers (int): number of lstm layers, 0 = no lstm - context (int): kernel size of the convolution in the - decoder before the transposed convolution. If > 1, - will provide some context from neighboring time - steps. - samplerate (int): stored as meta information for easing - future evaluations of the model. - segment_length (int): stored as meta information for easing - future evaluations of the model. Length of the segments on which - the model was trained. - """ - - super().__init__() - self.audio_channels = audio_channels - self.sources = sources - self.kernel_size = kernel_size - self.context = context - self.stride = stride - self.depth = depth - self.resample = resample - self.channels = channels - self.normalize = normalize - self.samplerate = samplerate - self.segment_length = segment_length - - self.encoder = nn.ModuleList() - self.decoder = nn.ModuleList() - - if glu: - activation = nn.GLU(dim=1) - ch_scale = 2 - else: - activation = nn.ReLU() - ch_scale = 1 - in_channels = audio_channels - for index in range(depth): - encode = [] - encode += [nn.Conv1d(in_channels, channels, kernel_size, stride), nn.ReLU()] - if rewrite: - encode += [nn.Conv1d(channels, ch_scale * channels, 1), activation] - self.encoder.append(nn.Sequential(*encode)) - - decode = [] - if index > 0: - out_channels = in_channels - else: - out_channels = len(self.sources) * audio_channels - if rewrite: - decode += [nn.Conv1d(channels, ch_scale * channels, context), activation] - decode += [nn.ConvTranspose1d(channels, out_channels, kernel_size, stride)] - if index > 0: - decode.append(nn.ReLU()) - self.decoder.insert(0, nn.Sequential(*decode)) - in_channels = channels - channels = int(growth * channels) - - channels = in_channels - - if lstm_layers: - self.lstm = BLSTM(channels, lstm_layers) - else: - self.lstm = None - - if rescale: - rescale_module(self, reference=rescale) - - def valid_length(self, length): - """ - Return the nearest valid length to use with the model so that - there is no time steps left over in a convolutions, e.g. for all - layers, size of the input - kernel_size % stride = 0. - - If the mixture has a valid length, the estimated sources - will have exactly the same length when context = 1. If context > 1, - the two signals can be center trimmed to match. - - For training, extracts should have a valid length.For evaluation - on full tracks we recommend passing `pad = True` to :method:`forward`. - """ - if self.resample: - length *= 2 - for _ in range(self.depth): - length = math.ceil((length - self.kernel_size) / self.stride) + 1 - length = max(1, length) - length += self.context - 1 - for _ in range(self.depth): - length = (length - 1) * self.stride + self.kernel_size - - if self.resample: - length = math.ceil(length / 2) - return int(length) - - def forward(self, mix): - x = mix - - if self.normalize: - mono = mix.mean(dim=1, keepdim=True) - mean = mono.mean(dim=-1, keepdim=True) - std = mono.std(dim=-1, keepdim=True) - else: - mean = 0 - std = 1 - - x = (x - mean) / (1e-5 + std) - - if self.resample: - x = julius.resample_frac(x, 1, 2) - - saved = [] - for encode in self.encoder: - x = encode(x) - saved.append(x) - if self.lstm: - x = self.lstm(x) - for decode in self.decoder: - skip = center_trim(saved.pop(-1), x) - x = x + skip - x = decode(x) - - if self.resample: - x = julius.resample_frac(x, 2, 1) - x = x * std + mean - x = x.view(x.size(0), len(self.sources), self.audio_channels, x.size(-1)) - return x diff --git a/spaces/ElainaFanBoy/MusicGen/tests/data/__init__.py b/spaces/ElainaFanBoy/MusicGen/tests/data/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/ElainaFanBoy/MusicGen/tests/data/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/FadouaFGM/Stackoverflow_Questions_Categorisation/app.py b/spaces/FadouaFGM/Stackoverflow_Questions_Categorisation/app.py deleted file mode 100644 index bdc1f0c9ae055ecec8907f2ce25e0d3f82a4c13a..0000000000000000000000000000000000000000 --- a/spaces/FadouaFGM/Stackoverflow_Questions_Categorisation/app.py +++ /dev/null @@ -1,242 +0,0 @@ -# #
    **Projet 5: Catégorisez automatiquement des questions** -# #
    **Notebook: API** -# -# ## link to the created HuggingFace Space: -# -# # https://huggingface.co/spaces/FadouaFGM/Stackoverflow_Questions_Categorisation -# -# -#
    - -# -# -# **On commence tout d'abord par la définition des fonctions globales utilisées pour le nettoyage du texte, ensuite la vectorisation et le modèle ML choisi lors de l'étude menée dans le notebook précédent.** - - - - -from sklearn.pipeline import Pipeline -from sklearn.feature_extraction.text import CountVectorizer -from sklearn.multiclass import OneVsRestClassifier -from sklearn.linear_model import LogisticRegression -from sklearn.decomposition import LatentDirichletAllocation -import pandas as pd -import numpy as np -import matplotlib.pyplot as plt -import seaborn as sns -import time -import warnings -import re -import nltk -import spacy -import re - -from nltk.tokenize import WordPunctTokenizer -from nltk.corpus import stopwords - -nlp = spacy.load("en_core_web_sm") -nlp.Defaults.stop_words.add("`,") -nlp.Defaults.stop_words.add("``") - - -# ### **Définition des fonctions et modèle** - - -# Define functions - -#lemmatize text without stop or punctuation words -def lemmatize(text): - doc = nlp(text) - tokens = [token.lemma_ for token in doc if not (token.is_stop or token.is_digit or token.is_punct)] - return ' '.join(tokens) - -def tokenization(text): - tokens = WordPunctTokenizer().tokenize(text) - return tokens - - -# Function to preprocess text -def clean(text): - # Lower case - text = text.lower() - # Removing paragraph numbers - text = re.sub(r'[0-9]+.\t', '', text) - # Change the pattern C# to csharp - pattern = r'c\#' - text = re.sub(pattern, 'csharp', text) - # Removing web and HTML links - text = re.sub(r'http\S+', '', text) - # Removing special characters - text = re.sub("

    ", '', str(text)) - text = re.sub("

    ", '', str(text)) - text = re.sub("", '', str(text)) - text = re.sub("

    ", '', str(text))
    -    text = re.sub("&", '', str(text))
    -    text = re.sub(";", '', str(text))
    -    text = re.sub("gt", ' ', str(text))
    -    text = re.sub("pre", '', str(text))
    -    # Removing any reference to outside text
    -    text = re.sub("[\(\[].*?[\)\]]", "", str(text))
    -    # Removing numbers
    -    text = re.sub('[0-9]', '', str(text))
    -    # Removing new line characters
    -    text = re.sub('\n ', '', str(text))
    -    text = re.sub('\n', ' ', str(text))
    -    # Removing apostrophes
    -    text = re.sub("'s", '', str(text))
    -    # Removing hyphens
    -    text = re.sub("-", ' ', str(text))
    -    text = re.sub("—", '', str(text))
    -    # Removing > or < or = signs
    -    text = re.sub("<", ' ', str(text))
    -    text = re.sub(">", '', str(text))
    -    text = re.sub("=", '', str(text))
    -    # Removing quotation marks
    -    text = re.sub('\"', '', str(text))
    -    # Removing quotation marks
    -    text = re.sub('/', '', str(text))
    -    # Use regex to delete all what's inside < >
    -    CLEANR = re.compile('<.*?>')
    -    text = re.sub(CLEANR, '', text)
    -    
    -    return text
    -
    -def remove_code(text):
    -    
    -    #first position of the code in code
    -    codepointer=text.find('')
    -    result=''
    -    
    -    while codepointer!=-1:
    -        #last position of /code
    -        codeender=text.find(u'',codepointer)
    -        #the code between pointer and ender
    -        result=result+text[codepointer:codeender+7]
    -        codepointer=text.find('',codeender)
    -        
    -    listOfWords2remove = ([i for i in result.split()])
    -    
    -    for i in listOfWords2remove:
    -        text = text.replace(i, '')    
    -        
    -    return text
    -
    -def text_processing(dfoftext):
    -    
    -    cleaneddftext = dfoftext.apply(lambda txt : remove_code(txt))
    -    cleaneddftext = cleaneddftext.apply(lambda txt : clean(txt))
    -    cleaneddftext = cleaneddftext.apply(lambda txt : lemmatize(txt))
    -    
    -    return cleaneddftext
    -
    -# Define function to predict with the new list of thresholds with attributing a threshold per label
    -def predict_with_thresholds(y_prob, thresholds):
    -    y_pred = np.zeros_like(y_prob)
    -    for i in range(y_prob.shape[1]):
    -        y_pred[:, i] = (y_prob[:, i] >= thresholds[i]).astype(int)
    -    return y_pred
    -
    -
    -import joblib
    -
    -def makeprediction(text):
    -    # load the pre-trained TfidfVectorizer from disk
    -    tfidfvectorizer = joblib.load('tfidf_vectorizer_100523.joblib')
    - 
    -    # load the pre-trained Linear_SGD classifier from disk
    -    ovr = joblib.load('linear_regression_classifier_100523.joblib')
    -    
    -    # Processing the text
    -    cleanedtext = text_processing(text)
    -    #print(cleanedtext)
    -    #print(type(cleanedtext))
    -    
    -    # applying the model and reconstruction predicted targets
    -    texttfidf = tfidfvectorizer.transform(cleanedtext)
    -    
    -    # make prediction with pretrained classifier
    -    ypred = ovr.predict_proba(texttfidf)
    -    #print(ypred)
    -    
    -    # recontructing tags from predicted y
    -    thresholds      = joblib.load('thresholds_100523.joblib')
    -    labels          = joblib.load('labels_100523.joblib')
    -    
    -    y_pred_thr      = predict_with_thresholds(ypred,thresholds)
    -    #print(y_pred_thr)
    -    
    -    tags_pred       = [[labels[i] for i in range(len(yp)) if yp[i] == 1] for yp in y_pred_thr]
    -    #tags_pred       = tags_pred.apply(lambda x: x if x else ['no predicted labels'])
    -    
    -    # Predict with unsupervised model the most important topics and key words related to the document
    -    # Load unsupervised pretrained model and dictionary
    -    lda_model = joblib.load('best_lda_model.joblib')
    -    lda_dictionary = joblib.load('lda_dictionary.joblib')
    -
    -    # process the document to be a corpus as needed for LDA algorithm
    -    cleanedtext = pd.Series(cleanedtext)
    -    text_lda = ' '.join((cleanedtext.tolist())) 
    -    corpus   = lda_dictionary.doc2bow(text_lda.split())
    -
    -    # get the topic distribution for the document
    -    doc_topics = lda_model.get_document_topics(corpus)
    -
    -    # filter topics with probability > 0.2 and sort them by probability
    -    important_topics = sorted([(topic, prob) for topic, prob in doc_topics if prob > 0.2], key=lambda x: x[1], reverse=True)
    -
    -    # print the most important topics related to the document and their probability
    -    print('The most important topics related to the document, and there probabilities, are:')
    -    for topic, prob in important_topics:
    -        print(f'Topic {topic}: {prob:.2f}')
    -
    -    # get the main keywords for each important topic
    -    important_topic_ids = [topic for topic, _ in important_topics]
    -    topic_keywords = lda_model.show_topics(num_topics=-1, formatted=False)
    -    main_keywords = []
    -    main_keywords = []
    -    for topic_id, topic_prob in topic_keywords:
    -        if topic_id in important_topic_ids:
    -            keywords = [(word, prob) for word, prob in topic_prob if prob > 0.05]
    -            main_keywords.append(keywords)
    -
    -    return tags_pred, important_topics, main_keywords
    -
    -# ### **Implémentation de l'API: GradioAPI**
    -import gradio as gra
    -from typing import List
    -
    -def predict(text: List[str]):
    -    data = [[text]]
    -    data = pd.DataFrame(data, columns = ['Text'])
    -    tags, topics, keywords = makeprediction(data['Text'])
    -    #return {"tags!😎": tags, "Related Topics": topics, "Most important key words": keywords }    
    -    return tags, topics, keywords
    -
    -inputs = gra.inputs.Textbox(label="Question to predict tags, topics, and keywords", lines=10)
    -outputs = [
    -    gra.outputs.Textbox(label="Tags"),
    -    gra.outputs.Textbox(label="Related Topics"),
    -    gra.outputs.Textbox(label="Related Keywords")
    -]
    -title = "Prediction: Tags, Topics, and Keywords for StackOverflow Questions"
    -
    -app = gra.Interface(fn=predict, inputs=inputs, outputs=outputs, title=title)
    -
    -app.launch(debug=True,enable_queue=True)
    -
    -#link to the created HuggingFace Space
    -# https://huggingface.co/spaces/FadouaFGM/Stackoverflow_Questions_Categorisation
    -# Process to update parameters of the model or any other changes on notebooks
    -# copy all files to the cloned repository
    -# cp ../Formation_ML/P5/*.joblib .
    -# cp ../Formation_ML/P5/app.py .
    -# add all files and commit
    -# git add .
    -# git commit -m "Update model parameters"
    -# git push
    -
    -#pip freeze > requirements.txt
    -
    -
    -
    -
    diff --git a/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Better.py b/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Better.py
    deleted file mode 100644
    index bee52870eb3300f25c9762ab204968791a2a30a9..0000000000000000000000000000000000000000
    --- a/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Better.py
    +++ /dev/null
    @@ -1,56 +0,0 @@
    -import os
    -import json
    -import requests
    -from typing import Dict, get_type_hints
    -
    -url = 'https://openai-proxy-api.vercel.app/v1/'
    -model = [
    -    'gpt-3.5-turbo',
    -    'gpt-3.5-turbo-0613',
    -    'gpt-3.5-turbo-16k',
    -    'gpt-3.5-turbo-16k-0613',
    -    'gpt-4',
    -]
    -
    -supports_stream = True
    -needs_auth = False
    -
    -
    -def _create_completion(model: str, messages: list, stream: bool, **kwargs):
    -    headers = {
    -        'Content-Type': 'application/json',
    -        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58',
    -        'Referer': 'https://chat.ylokh.xyz/',
    -        'Origin': 'https://chat.ylokh.xyz',
    -        'Connection': 'keep-alive',  
    -    }
    -
    -    json_data = {
    -        'messages': messages,
    -        'temperature': 1.0,
    -        'model': model,
    -        'stream': stream,
    -    }
    -
    -    response = requests.post(
    -        'https://openai-proxy-api.vercel.app/v1/chat/completions', headers=headers, json=json_data, stream=True
    -    )
    -
    -    for token in response.iter_lines():
    -        decoded = token.decode('utf-8')
    -        if decoded.startswith('data: '):
    -            data_str = decoded.replace('data: ', '')
    -            data = json.loads(data_str)
    -            if 'choices' in data and 'delta' in data['choices'][0]:
    -                delta = data['choices'][0]['delta']
    -                content = delta.get('content', '')
    -                finish_reason = delta.get('finish_reason', '')
    -
    -                if finish_reason == 'stop':
    -                    break
    -                if content:
    -                    yield content
    -
    -
    -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + '(%s)' % ', '.join(
    -    [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
    diff --git a/spaces/FoxMeo/fire-detector/models/experimental.py b/spaces/FoxMeo/fire-detector/models/experimental.py
    deleted file mode 100644
    index 735d7aa0ebe7dbf3c4b062ebc3858cb5f9ebab40..0000000000000000000000000000000000000000
    --- a/spaces/FoxMeo/fire-detector/models/experimental.py
    +++ /dev/null
    @@ -1,272 +0,0 @@
    -import numpy as np
    -import random
    -import torch
    -import torch.nn as nn
    -
    -from models.common import Conv, DWConv
    -from utils.google_utils import attempt_download
    -
    -
    -class CrossConv(nn.Module):
    -    # Cross Convolution Downsample
    -    def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
    -        # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
    -        super(CrossConv, self).__init__()
    -        c_ = int(c2 * e)  # hidden channels
    -        self.cv1 = Conv(c1, c_, (1, k), (1, s))
    -        self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
    -        self.add = shortcut and c1 == c2
    -
    -    def forward(self, x):
    -        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
    -
    -
    -class Sum(nn.Module):
    -    # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
    -    def __init__(self, n, weight=False):  # n: number of inputs
    -        super(Sum, self).__init__()
    -        self.weight = weight  # apply weights boolean
    -        self.iter = range(n - 1)  # iter object
    -        if weight:
    -            self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True)  # layer weights
    -
    -    def forward(self, x):
    -        y = x[0]  # no weight
    -        if self.weight:
    -            w = torch.sigmoid(self.w) * 2
    -            for i in self.iter:
    -                y = y + x[i + 1] * w[i]
    -        else:
    -            for i in self.iter:
    -                y = y + x[i + 1]
    -        return y
    -
    -
    -class MixConv2d(nn.Module):
    -    # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
    -    def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
    -        super(MixConv2d, self).__init__()
    -        groups = len(k)
    -        if equal_ch:  # equal c_ per group
    -            i = torch.linspace(0, groups - 1E-6, c2).floor()  # c2 indices
    -            c_ = [(i == g).sum() for g in range(groups)]  # intermediate channels
    -        else:  # equal weight.numel() per group
    -            b = [c2] + [0] * groups
    -            a = np.eye(groups + 1, groups, k=-1)
    -            a -= np.roll(a, 1, axis=1)
    -            a *= np.array(k) ** 2
    -            a[0] = 1
    -            c_ = np.linalg.lstsq(a, b, rcond=None)[0].round()  # solve for equal weight indices, ax = b
    -
    -        self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
    -        self.bn = nn.BatchNorm2d(c2)
    -        self.act = nn.LeakyReLU(0.1, inplace=True)
    -
    -    def forward(self, x):
    -        return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
    -
    -
    -class Ensemble(nn.ModuleList):
    -    # Ensemble of models
    -    def __init__(self):
    -        super(Ensemble, self).__init__()
    -
    -    def forward(self, x, augment=False):
    -        y = []
    -        for module in self:
    -            y.append(module(x, augment)[0])
    -        # y = torch.stack(y).max(0)[0]  # max ensemble
    -        # y = torch.stack(y).mean(0)  # mean ensemble
    -        y = torch.cat(y, 1)  # nms ensemble
    -        return y, None  # inference, train output
    -
    -
    -
    -
    -
    -class ORT_NMS(torch.autograd.Function):
    -    '''ONNX-Runtime NMS operation'''
    -    @staticmethod
    -    def forward(ctx,
    -                boxes,
    -                scores,
    -                max_output_boxes_per_class=torch.tensor([100]),
    -                iou_threshold=torch.tensor([0.45]),
    -                score_threshold=torch.tensor([0.25])):
    -        device = boxes.device
    -        batch = scores.shape[0]
    -        num_det = random.randint(0, 100)
    -        batches = torch.randint(0, batch, (num_det,)).sort()[0].to(device)
    -        idxs = torch.arange(100, 100 + num_det).to(device)
    -        zeros = torch.zeros((num_det,), dtype=torch.int64).to(device)
    -        selected_indices = torch.cat([batches[None], zeros[None], idxs[None]], 0).T.contiguous()
    -        selected_indices = selected_indices.to(torch.int64)
    -        return selected_indices
    -
    -    @staticmethod
    -    def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold):
    -        return g.op("NonMaxSuppression", boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold)
    -
    -
    -class TRT_NMS(torch.autograd.Function):
    -    '''TensorRT NMS operation'''
    -    @staticmethod
    -    def forward(
    -        ctx,
    -        boxes,
    -        scores,
    -        background_class=-1,
    -        box_coding=1,
    -        iou_threshold=0.45,
    -        max_output_boxes=100,
    -        plugin_version="1",
    -        score_activation=0,
    -        score_threshold=0.25,
    -    ):
    -        batch_size, num_boxes, num_classes = scores.shape
    -        num_det = torch.randint(0, max_output_boxes, (batch_size, 1), dtype=torch.int32)
    -        det_boxes = torch.randn(batch_size, max_output_boxes, 4)
    -        det_scores = torch.randn(batch_size, max_output_boxes)
    -        det_classes = torch.randint(0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32)
    -        return num_det, det_boxes, det_scores, det_classes
    -
    -    @staticmethod
    -    def symbolic(g,
    -                 boxes,
    -                 scores,
    -                 background_class=-1,
    -                 box_coding=1,
    -                 iou_threshold=0.45,
    -                 max_output_boxes=100,
    -                 plugin_version="1",
    -                 score_activation=0,
    -                 score_threshold=0.25):
    -        out = g.op("TRT::EfficientNMS_TRT",
    -                   boxes,
    -                   scores,
    -                   background_class_i=background_class,
    -                   box_coding_i=box_coding,
    -                   iou_threshold_f=iou_threshold,
    -                   max_output_boxes_i=max_output_boxes,
    -                   plugin_version_s=plugin_version,
    -                   score_activation_i=score_activation,
    -                   score_threshold_f=score_threshold,
    -                   outputs=4)
    -        nums, boxes, scores, classes = out
    -        return nums, boxes, scores, classes
    -
    -
    -class ONNX_ORT(nn.Module):
    -    '''onnx module with ONNX-Runtime NMS operation.'''
    -    def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=640, device=None, n_classes=80):
    -        super().__init__()
    -        self.device = device if device else torch.device("cpu")
    -        self.max_obj = torch.tensor([max_obj]).to(device)
    -        self.iou_threshold = torch.tensor([iou_thres]).to(device)
    -        self.score_threshold = torch.tensor([score_thres]).to(device)
    -        self.max_wh = max_wh # if max_wh != 0 : non-agnostic else : agnostic
    -        self.convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]],
    -                                           dtype=torch.float32,
    -                                           device=self.device)
    -        self.n_classes=n_classes
    -
    -    def forward(self, x):
    -        boxes = x[:, :, :4]
    -        conf = x[:, :, 4:5]
    -        scores = x[:, :, 5:]
    -        if self.n_classes == 1:
    -            scores = conf # for models with one class, cls_loss is 0 and cls_conf is always 0.5,
    -                                 # so there is no need to multiplicate.
    -        else:
    -            scores *= conf  # conf = obj_conf * cls_conf
    -        boxes @= self.convert_matrix
    -        max_score, category_id = scores.max(2, keepdim=True)
    -        dis = category_id.float() * self.max_wh
    -        nmsbox = boxes + dis
    -        max_score_tp = max_score.transpose(1, 2).contiguous()
    -        selected_indices = ORT_NMS.apply(nmsbox, max_score_tp, self.max_obj, self.iou_threshold, self.score_threshold)
    -        X, Y = selected_indices[:, 0], selected_indices[:, 2]
    -        selected_boxes = boxes[X, Y, :]
    -        selected_categories = category_id[X, Y, :].float()
    -        selected_scores = max_score[X, Y, :]
    -        X = X.unsqueeze(1).float()
    -        return torch.cat([X, selected_boxes, selected_categories, selected_scores], 1)
    -
    -class ONNX_TRT(nn.Module):
    -    '''onnx module with TensorRT NMS operation.'''
    -    def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None ,device=None, n_classes=80):
    -        super().__init__()
    -        assert max_wh is None
    -        self.device = device if device else torch.device('cpu')
    -        self.background_class = -1,
    -        self.box_coding = 1,
    -        self.iou_threshold = iou_thres
    -        self.max_obj = max_obj
    -        self.plugin_version = '1'
    -        self.score_activation = 0
    -        self.score_threshold = score_thres
    -        self.n_classes=n_classes
    -
    -    def forward(self, x):
    -        boxes = x[:, :, :4]
    -        conf = x[:, :, 4:5]
    -        scores = x[:, :, 5:]
    -        if self.n_classes == 1:
    -            scores = conf # for models with one class, cls_loss is 0 and cls_conf is always 0.5,
    -                                 # so there is no need to multiplicate.
    -        else:
    -            scores *= conf  # conf = obj_conf * cls_conf
    -        num_det, det_boxes, det_scores, det_classes = TRT_NMS.apply(boxes, scores, self.background_class, self.box_coding,
    -                                                                    self.iou_threshold, self.max_obj,
    -                                                                    self.plugin_version, self.score_activation,
    -                                                                    self.score_threshold)
    -        return num_det, det_boxes, det_scores, det_classes
    -
    -
    -class End2End(nn.Module):
    -    '''export onnx or tensorrt model with NMS operation.'''
    -    def __init__(self, model, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None, device=None, n_classes=80):
    -        super().__init__()
    -        device = device if device else torch.device('cpu')
    -        assert isinstance(max_wh,(int)) or max_wh is None
    -        self.model = model.to(device)
    -        self.model.model[-1].end2end = True
    -        self.patch_model = ONNX_TRT if max_wh is None else ONNX_ORT
    -        self.end2end = self.patch_model(max_obj, iou_thres, score_thres, max_wh, device, n_classes)
    -        self.end2end.eval()
    -
    -    def forward(self, x):
    -        x = self.model(x)
    -        x = self.end2end(x)
    -        return x
    -
    -
    -
    -
    -
    -def attempt_load(weights, map_location=None):
    -    # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
    -    model = Ensemble()
    -    for w in weights if isinstance(weights, list) else [weights]:
    -        attempt_download(w)
    -        ckpt = torch.load(w, map_location=map_location)  # load
    -        model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval())  # FP32 model
    -    
    -    # Compatibility updates
    -    for m in model.modules():
    -        if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
    -            m.inplace = True  # pytorch 1.7.0 compatibility
    -        elif type(m) is nn.Upsample:
    -            m.recompute_scale_factor = None  # torch 1.11.0 compatibility
    -        elif type(m) is Conv:
    -            m._non_persistent_buffers_set = set()  # pytorch 1.6.0 compatibility
    -    
    -    if len(model) == 1:
    -        return model[-1]  # return model
    -    else:
    -        print('Ensemble created with %s\n' % weights)
    -        for k in ['names', 'stride']:
    -            setattr(model, k, getattr(model[-1], k))
    -        return model  # return ensemble
    -
    -
    diff --git a/spaces/FrankZxShen/vits-fast-fineturning-models-ba/text/thai.py b/spaces/FrankZxShen/vits-fast-fineturning-models-ba/text/thai.py
    deleted file mode 100644
    index 998207c01a85c710a46db1ec8b62c39c2d94bc84..0000000000000000000000000000000000000000
    --- a/spaces/FrankZxShen/vits-fast-fineturning-models-ba/text/thai.py
    +++ /dev/null
    @@ -1,44 +0,0 @@
    -import re
    -from num_thai.thainumbers import NumThai
    -
    -
    -num = NumThai()
    -
    -# List of (Latin alphabet, Thai) pairs:
    -_latin_to_thai = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
    -    ('a', 'เอ'),
    -    ('b','บี'),
    -    ('c','ซี'),
    -    ('d','ดี'),
    -    ('e','อี'),
    -    ('f','เอฟ'),
    -    ('g','จี'),
    -    ('h','เอช'),
    -    ('i','ไอ'),
    -    ('j','เจ'),
    -    ('k','เค'),
    -    ('l','แอล'),
    -    ('m','เอ็ม'),
    -    ('n','เอ็น'),
    -    ('o','โอ'),
    -    ('p','พี'),
    -    ('q','คิว'),
    -    ('r','แอร์'),
    -    ('s','เอส'),
    -    ('t','ที'),
    -    ('u','ยู'),
    -    ('v','วี'),
    -    ('w','ดับเบิลยู'),
    -    ('x','เอ็กซ์'),
    -    ('y','วาย'),
    -    ('z','ซี')
    -]]
    -
    -
    -def num_to_thai(text):
    -    return re.sub(r'(?:\d+(?:,?\d+)?)+(?:\.\d+(?:,?\d+)?)?', lambda x: ''.join(num.NumberToTextThai(float(x.group(0).replace(',', '')))), text)
    -
    -def latin_to_thai(text):
    -    for regex, replacement in _latin_to_thai:
    -        text = re.sub(regex, replacement, text)
    -    return text
    diff --git a/spaces/GIZ/SDSN-demo/utils/ndc_explorer.py b/spaces/GIZ/SDSN-demo/utils/ndc_explorer.py
    deleted file mode 100644
    index 6bdb527b232ac800156b1c5ca50ddd97e0560d0b..0000000000000000000000000000000000000000
    --- a/spaces/GIZ/SDSN-demo/utils/ndc_explorer.py
    +++ /dev/null
    @@ -1,90 +0,0 @@
    -
    -import urllib.request
    -import json
    -
    -link = "https://klimalog.die-gdi.de/ndc/open-data/dataset.json"
    -def get_document(country_code: str):
    -    """
    -    read the country NDC data from 
    -    https://klimalog.die-gdi.de/ndc/open-data/dataset.json 
    -    using the country code.
    -    
    -    Params
    -    -------
    -    country_code:"""
    -    with urllib.request.urlopen(link) as urlfile:
    -        data =  json.loads(urlfile.read())
    -    categoriesData = {}
    -    categoriesData['categories']= data['categories']
    -    categoriesData['subcategories']= data['subcategories']
    -    keys_sub = categoriesData['subcategories'].keys()
    -    documentType= 'NDCs'
    -    if documentType in data.keys():
    -        if country_code in data[documentType].keys():
    -            get_dict = {}
    -            for key, value in data[documentType][country_code].items():
    -                if key not in ['country_name','region_id', 'region_name']:
    -                    get_dict[key] = value['classification']
    -                else:
    -                    get_dict[key] = value
    -        else:
    -            return None
    -    else:
    -        return None
    -
    -    country = {}
    -    for key in categoriesData['categories']:
    -        country[key]= {}
    -    for key,value in categoriesData['subcategories'].items():
    -        country[value['category']][key] = get_dict[key]
    -    
    -    return country
    -        
    -            
    -def countrySpecificCCA(cca_sent:dict, threshold:int, countryCode:str):
    -    """
    -    based on the countrycode, reads the country data from
    -    https://klimalog.die-gdi.de/ndc/open-data/dataset.json
    -    using get_documents from utils.ndc_explorer.py
    -    then based on thereshold value filters the Climate Change Adaptation
    -    targets assigned by NDC explorer team to that country. Using the sentences
    -    create by Data services team of GIZ for each target level, tries to find the
    -    relevant passages from the document by doing the semantic search.
    -
    -    Params
    -    -------
    -    cca_sent: dictionary with key as 'target labels' and manufactured sentences 
    -    reflecting the target level. Please see the docStore/ndcs/cca.txt
    -
    -    threshold: NDC target have many categoriees ranging from [0-5], with 0 
    -    refelcting most relaxed attitude and 5 being most aggrisive towards Climate 
    -    change. We select the threshold value beyond which we need to focus on.
    -
    -    countryCode: standard country code to allow us to fetch the country specific
    -    data.
    -
    -    """
    -    temp = {}
    -    doc = get_document(countryCode)
    -    for key,value in cca_sent.items():
    -        id_ = doc['climate change adaptation'][key]['id']
    -        if id_ >threshold:
    -            temp[key] = value['id'][id_]
    -    return temp
    -
    -                
    -def countrySpecificCCM(ccm_sent, threshold, countryCode):
    -    """
    -    see the documentation of countrySpecificCCA. This is same instead of 
    -    this gets the data pertaining to Adaptation
    -    
    -    """
    -
    -    temp = {}
    -    doc = get_document(countryCode)
    -    for key,value in ccm_sent.items():
    -        id_ = doc['climate change mitigation'][key]['id']
    -        if id_ >threshold:
    -            temp[key] = value['id'][id_]
    -    
    -    return temp
    diff --git a/spaces/Gen-Sim/Gen-Sim/notebooks/print_results.py b/spaces/Gen-Sim/Gen-Sim/notebooks/print_results.py
    deleted file mode 100644
    index 6ea11091fc06280f613b073f4ffe09382c1141c9..0000000000000000000000000000000000000000
    --- a/spaces/Gen-Sim/Gen-Sim/notebooks/print_results.py
    +++ /dev/null
    @@ -1,145 +0,0 @@
    -import os
    -import sys
    -import json
    -
    -from cliport import agents
    -from cliport import tasks
    -import argparse
    -import datetime
    -import matplotlib as mpl
    -
    -mpl.use("Agg")
    -import argparse
    -import os
    -import pandas as pd
    -import seaborn as sns
    -import matplotlib.pyplot as plt
    -import matplotlib
    -import IPython
    -import numpy as np
    -font = {
    -    "size": 22,
    -}
    -matplotlib.rc("font", **font)
    -sns.set_context("paper", font_scale=2.0)
    -
    -
    -def mkdir_if_missing(dst_dir):
    -    if not os.path.exists(dst_dir):
    -        os.makedirs(dst_dir)
    -
    -
    -def save_figure(name, title=""):
    -    print(f"output/output_figures/{name}.png")
    -    if len(title) > 0:
    -        plt.title(title)
    -    plt.tight_layout()
    -    mkdir_if_missing(f"output/output_figures/{name}")
    -    plt.savefig(f"output/output_figures/{name}/output.png")
    -    plt.clf()
    -
    -
    -def print_and_write(file_handle, text):
    -    print(text)
    -    if file_handle is not None:
    -        file_handle.write(text + "\n")
    -    return text
    -
    -parser = argparse.ArgumentParser()
    -
    -# federated arguments (Notation for the arguments followed from paper)
    -parser.add_argument(
    -    "--results", "-r", type=str, default="exps/exps-singletask"
    -)
    -parser.add_argument(
    -    "--single", "-s", action="store_true", default=False
    -)
    -args = parser.parse_args()
    -
    -root_folder = os.environ['GENSIM_ROOT']
    -exp_folder = os.path.join(root_folder, args.results) # replace 'cliport_quickstart' with your exps folder
    -
    -
    -mkdir_if_missing('output/output_figures')
    -mkdir_if_missing('output/cliport_output')
    -mkdir_if_missing('output/output_stat')
    -
    -
    -
    -output_stat_file = os.path.join('output/', 'cliport_output/', 'cliport-training.txt')
    -mkdir_if_missing('output/cliport_output/')
    -file_handle = open(output_stat_file, 'a+')
    -
    -tasks_list = list(tasks.names.keys())
    -agents_list = list(agents.names.keys())
    -demos_list = [1, 5, 10, 20, 30, 50, 100, 200, 1000] # 100,
    -
    -results = {}
    -for t in tasks_list:
    -    for a in agents_list:
    -        for d in demos_list:
    -            task_folder = f'{t}-{a}-n{d}-train'
    -            task_folder_path = os.path.join(exp_folder, task_folder, 'checkpoints')
    -
    -            if os.path.exists(task_folder_path):
    -                print(f"train {task_folder_path}")
    -
    -                jsons = [f for f in os.listdir(task_folder_path) if '.json' in f]
    -                for j in jsons:
    -                    model_type = 'multi' if 'multi' in j else 'single'
    -                    eval_type = 'val' if 'val' in j else 'test'
    -                    
    -                    with open(os.path.join(task_folder_path, j)) as f:
    -                        res = json.load(f)
    -                    
    -                    results[f'{t}-{a}-n{d}-{model_type}-{eval_type}'] = res
    -
    -dt_string = datetime.datetime.now().strftime("%d_%m_%Y_%H:%M:%S")
    -print_and_write(file_handle, f"==========================={dt_string}=========================\n")
    -print_and_write(file_handle, f'Experiments folder: {exp_folder}\n')
    -
    -data = {'task': [], 'success': []}
    -
    -for eval_type in ['val', 'test']:
    -    print_and_write(file_handle, f'----- {eval_type.upper()} -----\n')
    -    for t in tasks_list:
    -        for a in agents_list:
    -            for d in demos_list:
    -                for model_type in ['single', 'multi']:
    -                    eval_key = f'{t}-{a}-n{d}-{model_type}-{eval_type}'
    -                    
    -                    if eval_key in results:    
    -                        print_and_write(file_handle, f'{eval_key} {t} | Train Demos: {d}')
    -                        res = results[eval_key]
    -                        best_score, best_ckpt = max(zip([v['mean_reward'] for v in list(res.values())], res.keys()))
    -                        # TODO: test that this works for full results folder
    -                        
    -                        print_and_write(file_handle, f'\t{best_score*100:1.1f} : {a} | {model_type}\n')
    -                        data['task'].append(t)
    -                        data['success'].append(best_score)
    -
    -data['task'].append("Average")
    -data['success'].append(np.mean(data["success"]))
    -
    -
    -# make figure as well for sinle expeirment results
    -dfs = []
    -suffix = ""
    -run_num = 0
    -df = pd.DataFrame.from_dict(data)
    -title = args.results + "_res"
    -
    -# rewards
    -fig, ax = plt.subplots(figsize=(16, 8))
    -sns_plot = sns.barplot(
    -    data=df, x="task", y="success", errorbar=("sd", 1), palette="deep"
    -)
    -
    -# label texts
    -for container in ax.containers:
    -    ax.bar_label(container, label_type="center", fontsize="x-large", fmt="%.2f")
    -# ax.set_xticklabels(ax.get_xticklabels(), rotation=90, ha="right")
    -ax.set_xticklabels(['\n'.join(str(xlabel.get_text()).split("-")) for xlabel in ax.get_xticklabels()])
    -
    -# save plot
    -save_figure(f"{title}", title)
    diff --git a/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train10_gptmixcliport5_new_pickplace_demo10.sh b/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train10_gptmixcliport5_new_pickplace_demo10.sh
    deleted file mode 100644
    index 7e36cd7688b092b053a693f33f96244e5a91449d..0000000000000000000000000000000000000000
    --- a/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train10_gptmixcliport5_new_pickplace_demo10.sh
    +++ /dev/null
    @@ -1,12 +0,0 @@
    -#!/bin/bash
    -#SBATCH -c 10
    -#SBATCH -n 1
    -#SBATCH -o logs/%j.out
    -#SBATCH --exclusive
    -STEPS=${1-'50000'}
    -
    -
    -sh scripts/traintest_scripts/train_test_multi_task_goal_demo10.sh data \
    -		"[stack-block-pyramid,align-box-corner,put-block-in-bowl,packing-boxes,block-insertion,color_linked_ball_bowl_ordering,color_specific_container_fill,insert_blocks_into_fixture,sort_insert_color_coordinated_blocks,color_ordered_blocks_on_pallet,color-coordinated-sphere-insertion,rainbow-stack,put-block-in-bowl,vertical-insertion-blocks,stack-blocks-in-container]" \
    -		"[stack-block-pyramid,put-block-in-bowl,align-box-corner,packing-boxes,block-insertion]" \
    -		gpt10_mixcliport5_task_new 
    diff --git a/spaces/GipAdonimus/Real-Time-Voice-Cloning/synthesizer/preprocess.py b/spaces/GipAdonimus/Real-Time-Voice-Cloning/synthesizer/preprocess.py
    deleted file mode 100644
    index cde325c4163d6800404de214202d773addfff296..0000000000000000000000000000000000000000
    --- a/spaces/GipAdonimus/Real-Time-Voice-Cloning/synthesizer/preprocess.py
    +++ /dev/null
    @@ -1,259 +0,0 @@
    -from multiprocessing.pool import Pool 
    -from synthesizer import audio
    -from functools import partial
    -from itertools import chain
    -from encoder import inference as encoder
    -from pathlib import Path
    -from utils import logmmse
    -from tqdm import tqdm
    -import numpy as np
    -import librosa
    -
    -
    -def preprocess_dataset(datasets_root: Path, out_dir: Path, n_processes: int,
    -                           skip_existing: bool, hparams, no_alignments: bool,
    -                           datasets_name: str, subfolders: str):
    -    # Gather the input directories
    -    dataset_root = datasets_root.joinpath(datasets_name)
    -    input_dirs = [dataset_root.joinpath(subfolder.strip()) for subfolder in subfolders.split(",")]
    -    print("\n    ".join(map(str, ["Using data from:"] + input_dirs)))
    -    assert all(input_dir.exists() for input_dir in input_dirs)
    -    
    -    # Create the output directories for each output file type
    -    out_dir.joinpath("mels").mkdir(exist_ok=True)
    -    out_dir.joinpath("audio").mkdir(exist_ok=True)
    -    
    -    # Create a metadata file
    -    metadata_fpath = out_dir.joinpath("train.txt")
    -    metadata_file = metadata_fpath.open("a" if skip_existing else "w", encoding="utf-8")
    -
    -    # Preprocess the dataset
    -    speaker_dirs = list(chain.from_iterable(input_dir.glob("*") for input_dir in input_dirs))
    -    func = partial(preprocess_speaker, out_dir=out_dir, skip_existing=skip_existing, 
    -                   hparams=hparams, no_alignments=no_alignments)
    -    job = Pool(n_processes).imap(func, speaker_dirs)
    -    for speaker_metadata in tqdm(job, datasets_name, len(speaker_dirs), unit="speakers"):
    -        for metadatum in speaker_metadata:
    -            metadata_file.write("|".join(str(x) for x in metadatum) + "\n")
    -    metadata_file.close()
    -
    -    # Verify the contents of the metadata file
    -    with metadata_fpath.open("r", encoding="utf-8") as metadata_file:
    -        metadata = [line.split("|") for line in metadata_file]
    -    mel_frames = sum([int(m[4]) for m in metadata])
    -    timesteps = sum([int(m[3]) for m in metadata])
    -    sample_rate = hparams.sample_rate
    -    hours = (timesteps / sample_rate) / 3600
    -    print("The dataset consists of %d utterances, %d mel frames, %d audio timesteps (%.2f hours)." %
    -          (len(metadata), mel_frames, timesteps, hours))
    -    print("Max input length (text chars): %d" % max(len(m[5]) for m in metadata))
    -    print("Max mel frames length: %d" % max(int(m[4]) for m in metadata))
    -    print("Max audio timesteps length: %d" % max(int(m[3]) for m in metadata))
    -
    -
    -def preprocess_speaker(speaker_dir, out_dir: Path, skip_existing: bool, hparams, no_alignments: bool):
    -    metadata = []
    -    for book_dir in speaker_dir.glob("*"):
    -        if no_alignments:
    -            # Gather the utterance audios and texts
    -            # LibriTTS uses .wav but we will include extensions for compatibility with other datasets
    -            extensions = ["*.wav", "*.flac", "*.mp3"]
    -            for extension in extensions:
    -                wav_fpaths = book_dir.glob(extension)
    -
    -                for wav_fpath in wav_fpaths:
    -                    # Load the audio waveform
    -                    wav, _ = librosa.load(str(wav_fpath), hparams.sample_rate)
    -                    if hparams.rescale:
    -                        wav = wav / np.abs(wav).max() * hparams.rescaling_max
    -
    -                    # Get the corresponding text
    -                    # Check for .txt (for compatibility with other datasets)
    -                    text_fpath = wav_fpath.with_suffix(".txt")
    -                    if not text_fpath.exists():
    -                        # Check for .normalized.txt (LibriTTS)
    -                        text_fpath = wav_fpath.with_suffix(".normalized.txt")
    -                        assert text_fpath.exists()
    -                    with text_fpath.open("r") as text_file:
    -                        text = "".join([line for line in text_file])
    -                        text = text.replace("\"", "")
    -                        text = text.strip()
    -
    -                    # Process the utterance
    -                    metadata.append(process_utterance(wav, text, out_dir, str(wav_fpath.with_suffix("").name),
    -                                                      skip_existing, hparams))
    -        else:
    -            # Process alignment file (LibriSpeech support)
    -            # Gather the utterance audios and texts
    -            try:
    -                alignments_fpath = next(book_dir.glob("*.alignment.txt"))
    -                with alignments_fpath.open("r") as alignments_file:
    -                    alignments = [line.rstrip().split(" ") for line in alignments_file]
    -            except StopIteration:
    -                # A few alignment files will be missing
    -                continue
    -
    -            # Iterate over each entry in the alignments file
    -            for wav_fname, words, end_times in alignments:
    -                wav_fpath = book_dir.joinpath(wav_fname + ".flac")
    -                assert wav_fpath.exists()
    -                words = words.replace("\"", "").split(",")
    -                end_times = list(map(float, end_times.replace("\"", "").split(",")))
    -
    -                # Process each sub-utterance
    -                wavs, texts = split_on_silences(wav_fpath, words, end_times, hparams)
    -                for i, (wav, text) in enumerate(zip(wavs, texts)):
    -                    sub_basename = "%s_%02d" % (wav_fname, i)
    -                    metadata.append(process_utterance(wav, text, out_dir, sub_basename,
    -                                                      skip_existing, hparams))
    -
    -    return [m for m in metadata if m is not None]
    -
    -
    -def split_on_silences(wav_fpath, words, end_times, hparams):
    -    # Load the audio waveform
    -    wav, _ = librosa.load(str(wav_fpath), hparams.sample_rate)
    -    if hparams.rescale:
    -        wav = wav / np.abs(wav).max() * hparams.rescaling_max
    -    
    -    words = np.array(words)
    -    start_times = np.array([0.0] + end_times[:-1])
    -    end_times = np.array(end_times)
    -    assert len(words) == len(end_times) == len(start_times)
    -    assert words[0] == "" and words[-1] == ""
    -    
    -    # Find pauses that are too long
    -    mask = (words == "") & (end_times - start_times >= hparams.silence_min_duration_split)
    -    mask[0] = mask[-1] = True
    -    breaks = np.where(mask)[0]
    -
    -    # Profile the noise from the silences and perform noise reduction on the waveform
    -    silence_times = [[start_times[i], end_times[i]] for i in breaks]
    -    silence_times = (np.array(silence_times) * hparams.sample_rate).astype(np.int)
    -    noisy_wav = np.concatenate([wav[stime[0]:stime[1]] for stime in silence_times])
    -    if len(noisy_wav) > hparams.sample_rate * 0.02:
    -        profile = logmmse.profile_noise(noisy_wav, hparams.sample_rate)
    -        wav = logmmse.denoise(wav, profile, eta=0)
    -    
    -    # Re-attach segments that are too short
    -    segments = list(zip(breaks[:-1], breaks[1:]))
    -    segment_durations = [start_times[end] - end_times[start] for start, end in segments]
    -    i = 0
    -    while i < len(segments) and len(segments) > 1:
    -        if segment_durations[i] < hparams.utterance_min_duration:
    -            # See if the segment can be re-attached with the right or the left segment
    -            left_duration = float("inf") if i == 0 else segment_durations[i - 1]
    -            right_duration = float("inf") if i == len(segments) - 1 else segment_durations[i + 1]
    -            joined_duration = segment_durations[i] + min(left_duration, right_duration)
    -
    -            # Do not re-attach if it causes the joined utterance to be too long
    -            if joined_duration > hparams.hop_size * hparams.max_mel_frames / hparams.sample_rate:
    -                i += 1
    -                continue
    -
    -            # Re-attach the segment with the neighbour of shortest duration
    -            j = i - 1 if left_duration <= right_duration else i
    -            segments[j] = (segments[j][0], segments[j + 1][1])
    -            segment_durations[j] = joined_duration
    -            del segments[j + 1], segment_durations[j + 1]
    -        else:
    -            i += 1
    -    
    -    # Split the utterance
    -    segment_times = [[end_times[start], start_times[end]] for start, end in segments]
    -    segment_times = (np.array(segment_times) * hparams.sample_rate).astype(np.int)
    -    wavs = [wav[segment_time[0]:segment_time[1]] for segment_time in segment_times]
    -    texts = [" ".join(words[start + 1:end]).replace("  ", " ") for start, end in segments]
    -    
    -    # # DEBUG: play the audio segments (run with -n=1)
    -    # import sounddevice as sd
    -    # if len(wavs) > 1:
    -    #     print("This sentence was split in %d segments:" % len(wavs))
    -    # else:
    -    #     print("There are no silences long enough for this sentence to be split:")
    -    # for wav, text in zip(wavs, texts):
    -    #     # Pad the waveform with 1 second of silence because sounddevice tends to cut them early
    -    #     # when playing them. You shouldn't need to do that in your parsers.
    -    #     wav = np.concatenate((wav, [0] * 16000))
    -    #     print("\t%s" % text)
    -    #     sd.play(wav, 16000, blocking=True)
    -    # print("")
    -    
    -    return wavs, texts
    -    
    -    
    -def process_utterance(wav: np.ndarray, text: str, out_dir: Path, basename: str, 
    -                      skip_existing: bool, hparams):
    -    ## FOR REFERENCE:
    -    # For you not to lose your head if you ever wish to change things here or implement your own
    -    # synthesizer.
    -    # - Both the audios and the mel spectrograms are saved as numpy arrays
    -    # - There is no processing done to the audios that will be saved to disk beyond volume  
    -    #   normalization (in split_on_silences)
    -    # - However, pre-emphasis is applied to the audios before computing the mel spectrogram. This
    -    #   is why we re-apply it on the audio on the side of the vocoder.
    -    # - Librosa pads the waveform before computing the mel spectrogram. Here, the waveform is saved
    -    #   without extra padding. This means that you won't have an exact relation between the length
    -    #   of the wav and of the mel spectrogram. See the vocoder data loader.
    -    
    -    
    -    # Skip existing utterances if needed
    -    mel_fpath = out_dir.joinpath("mels", "mel-%s.npy" % basename)
    -    wav_fpath = out_dir.joinpath("audio", "audio-%s.npy" % basename)
    -    if skip_existing and mel_fpath.exists() and wav_fpath.exists():
    -        return None
    -
    -    # Trim silence
    -    if hparams.trim_silence:
    -        wav = encoder.preprocess_wav(wav, normalize=False, trim_silence=True)
    -    
    -    # Skip utterances that are too short
    -    if len(wav) < hparams.utterance_min_duration * hparams.sample_rate:
    -        return None
    -    
    -    # Compute the mel spectrogram
    -    mel_spectrogram = audio.melspectrogram(wav, hparams).astype(np.float32)
    -    mel_frames = mel_spectrogram.shape[1]
    -    
    -    # Skip utterances that are too long
    -    if mel_frames > hparams.max_mel_frames and hparams.clip_mels_length:
    -        return None
    -    
    -    # Write the spectrogram, embed and audio to disk
    -    np.save(mel_fpath, mel_spectrogram.T, allow_pickle=False)
    -    np.save(wav_fpath, wav, allow_pickle=False)
    -    
    -    # Return a tuple describing this training example
    -    return wav_fpath.name, mel_fpath.name, "embed-%s.npy" % basename, len(wav), mel_frames, text
    - 
    - 
    -def embed_utterance(fpaths, encoder_model_fpath):
    -    if not encoder.is_loaded():
    -        encoder.load_model(encoder_model_fpath)
    -
    -    # Compute the speaker embedding of the utterance
    -    wav_fpath, embed_fpath = fpaths
    -    wav = np.load(wav_fpath)
    -    wav = encoder.preprocess_wav(wav)
    -    embed = encoder.embed_utterance(wav)
    -    np.save(embed_fpath, embed, allow_pickle=False)
    -    
    - 
    -def create_embeddings(synthesizer_root: Path, encoder_model_fpath: Path, n_processes: int):
    -    wav_dir = synthesizer_root.joinpath("audio")
    -    metadata_fpath = synthesizer_root.joinpath("train.txt")
    -    assert wav_dir.exists() and metadata_fpath.exists()
    -    embed_dir = synthesizer_root.joinpath("embeds")
    -    embed_dir.mkdir(exist_ok=True)
    -    
    -    # Gather the input wave filepath and the target output embed filepath
    -    with metadata_fpath.open("r") as metadata_file:
    -        metadata = [line.split("|") for line in metadata_file]
    -        fpaths = [(wav_dir.joinpath(m[0]), embed_dir.joinpath(m[2])) for m in metadata]
    -        
    -    # TODO: improve on the multiprocessing, it's terrible. Disk I/O is the bottleneck here.
    -    # Embed the utterances in separate threads
    -    func = partial(embed_utterance, encoder_model_fpath=encoder_model_fpath)
    -    job = Pool(n_processes).imap(func, fpaths)
    -    list(tqdm(job, "Embedding", len(fpaths), unit="utterances"))
    -    
    diff --git a/spaces/GipAdonimus/Real-Time-Voice-Cloning/synthesizer/utils/__init__.py b/spaces/GipAdonimus/Real-Time-Voice-Cloning/synthesizer/utils/__init__.py
    deleted file mode 100644
    index 5ae3e48110e61231acf1e666e5fa76af5e4ebdcd..0000000000000000000000000000000000000000
    --- a/spaces/GipAdonimus/Real-Time-Voice-Cloning/synthesizer/utils/__init__.py
    +++ /dev/null
    @@ -1,45 +0,0 @@
    -import torch
    -
    -
    -_output_ref = None
    -_replicas_ref = None
    -
    -def data_parallel_workaround(model, *input):
    -    global _output_ref
    -    global _replicas_ref
    -    device_ids = list(range(torch.cuda.device_count()))
    -    output_device = device_ids[0]
    -    replicas = torch.nn.parallel.replicate(model, device_ids)
    -    # input.shape = (num_args, batch, ...)
    -    inputs = torch.nn.parallel.scatter(input, device_ids)
    -    # inputs.shape = (num_gpus, num_args, batch/num_gpus, ...)
    -    replicas = replicas[:len(inputs)]
    -    outputs = torch.nn.parallel.parallel_apply(replicas, inputs)
    -    y_hat = torch.nn.parallel.gather(outputs, output_device)
    -    _output_ref = outputs
    -    _replicas_ref = replicas
    -    return y_hat
    -
    -
    -class ValueWindow():
    -  def __init__(self, window_size=100):
    -    self._window_size = window_size
    -    self._values = []
    -
    -  def append(self, x):
    -    self._values = self._values[-(self._window_size - 1):] + [x]
    -
    -  @property
    -  def sum(self):
    -    return sum(self._values)
    -
    -  @property
    -  def count(self):
    -    return len(self._values)
    -
    -  @property
    -  def average(self):
    -    return self.sum / max(1, self.count)
    -
    -  def reset(self):
    -    self._values = []
    diff --git a/spaces/Goutam982/RVC_V2_voice_clone/lib/infer_pack/models_onnx.py b/spaces/Goutam982/RVC_V2_voice_clone/lib/infer_pack/models_onnx.py
    deleted file mode 100644
    index 963e67b29f828e9fdd096397952054fe77cf3d10..0000000000000000000000000000000000000000
    --- a/spaces/Goutam982/RVC_V2_voice_clone/lib/infer_pack/models_onnx.py
    +++ /dev/null
    @@ -1,819 +0,0 @@
    -import math, pdb, os
    -from time import time as ttime
    -import torch
    -from torch import nn
    -from torch.nn import functional as F
    -from lib.infer_pack import modules
    -from lib.infer_pack import attentions
    -from lib.infer_pack import commons
    -from lib.infer_pack.commons import init_weights, get_padding
    -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
    -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
    -from lib.infer_pack.commons import init_weights
    -import numpy as np
    -from lib.infer_pack import commons
    -
    -
    -class TextEncoder256(nn.Module):
    -    def __init__(
    -        self,
    -        out_channels,
    -        hidden_channels,
    -        filter_channels,
    -        n_heads,
    -        n_layers,
    -        kernel_size,
    -        p_dropout,
    -        f0=True,
    -    ):
    -        super().__init__()
    -        self.out_channels = out_channels
    -        self.hidden_channels = hidden_channels
    -        self.filter_channels = filter_channels
    -        self.n_heads = n_heads
    -        self.n_layers = n_layers
    -        self.kernel_size = kernel_size
    -        self.p_dropout = p_dropout
    -        self.emb_phone = nn.Linear(256, hidden_channels)
    -        self.lrelu = nn.LeakyReLU(0.1, inplace=True)
    -        if f0 == True:
    -            self.emb_pitch = nn.Embedding(256, hidden_channels)  # pitch 256
    -        self.encoder = attentions.Encoder(
    -            hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
    -        )
    -        self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
    -
    -    def forward(self, phone, pitch, lengths):
    -        if pitch == None:
    -            x = self.emb_phone(phone)
    -        else:
    -            x = self.emb_phone(phone) + self.emb_pitch(pitch)
    -        x = x * math.sqrt(self.hidden_channels)  # [b, t, h]
    -        x = self.lrelu(x)
    -        x = torch.transpose(x, 1, -1)  # [b, h, t]
    -        x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
    -            x.dtype
    -        )
    -        x = self.encoder(x * x_mask, x_mask)
    -        stats = self.proj(x) * x_mask
    -
    -        m, logs = torch.split(stats, self.out_channels, dim=1)
    -        return m, logs, x_mask
    -
    -
    -class TextEncoder768(nn.Module):
    -    def __init__(
    -        self,
    -        out_channels,
    -        hidden_channels,
    -        filter_channels,
    -        n_heads,
    -        n_layers,
    -        kernel_size,
    -        p_dropout,
    -        f0=True,
    -    ):
    -        super().__init__()
    -        self.out_channels = out_channels
    -        self.hidden_channels = hidden_channels
    -        self.filter_channels = filter_channels
    -        self.n_heads = n_heads
    -        self.n_layers = n_layers
    -        self.kernel_size = kernel_size
    -        self.p_dropout = p_dropout
    -        self.emb_phone = nn.Linear(768, hidden_channels)
    -        self.lrelu = nn.LeakyReLU(0.1, inplace=True)
    -        if f0 == True:
    -            self.emb_pitch = nn.Embedding(256, hidden_channels)  # pitch 256
    -        self.encoder = attentions.Encoder(
    -            hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
    -        )
    -        self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
    -
    -    def forward(self, phone, pitch, lengths):
    -        if pitch == None:
    -            x = self.emb_phone(phone)
    -        else:
    -            x = self.emb_phone(phone) + self.emb_pitch(pitch)
    -        x = x * math.sqrt(self.hidden_channels)  # [b, t, h]
    -        x = self.lrelu(x)
    -        x = torch.transpose(x, 1, -1)  # [b, h, t]
    -        x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
    -            x.dtype
    -        )
    -        x = self.encoder(x * x_mask, x_mask)
    -        stats = self.proj(x) * x_mask
    -
    -        m, logs = torch.split(stats, self.out_channels, dim=1)
    -        return m, logs, x_mask
    -
    -
    -class ResidualCouplingBlock(nn.Module):
    -    def __init__(
    -        self,
    -        channels,
    -        hidden_channels,
    -        kernel_size,
    -        dilation_rate,
    -        n_layers,
    -        n_flows=4,
    -        gin_channels=0,
    -    ):
    -        super().__init__()
    -        self.channels = channels
    -        self.hidden_channels = hidden_channels
    -        self.kernel_size = kernel_size
    -        self.dilation_rate = dilation_rate
    -        self.n_layers = n_layers
    -        self.n_flows = n_flows
    -        self.gin_channels = gin_channels
    -
    -        self.flows = nn.ModuleList()
    -        for i in range(n_flows):
    -            self.flows.append(
    -                modules.ResidualCouplingLayer(
    -                    channels,
    -                    hidden_channels,
    -                    kernel_size,
    -                    dilation_rate,
    -                    n_layers,
    -                    gin_channels=gin_channels,
    -                    mean_only=True,
    -                )
    -            )
    -            self.flows.append(modules.Flip())
    -
    -    def forward(self, x, x_mask, g=None, reverse=False):
    -        if not reverse:
    -            for flow in self.flows:
    -                x, _ = flow(x, x_mask, g=g, reverse=reverse)
    -        else:
    -            for flow in reversed(self.flows):
    -                x = flow(x, x_mask, g=g, reverse=reverse)
    -        return x
    -
    -    def remove_weight_norm(self):
    -        for i in range(self.n_flows):
    -            self.flows[i * 2].remove_weight_norm()
    -
    -
    -class PosteriorEncoder(nn.Module):
    -    def __init__(
    -        self,
    -        in_channels,
    -        out_channels,
    -        hidden_channels,
    -        kernel_size,
    -        dilation_rate,
    -        n_layers,
    -        gin_channels=0,
    -    ):
    -        super().__init__()
    -        self.in_channels = in_channels
    -        self.out_channels = out_channels
    -        self.hidden_channels = hidden_channels
    -        self.kernel_size = kernel_size
    -        self.dilation_rate = dilation_rate
    -        self.n_layers = n_layers
    -        self.gin_channels = gin_channels
    -
    -        self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
    -        self.enc = modules.WN(
    -            hidden_channels,
    -            kernel_size,
    -            dilation_rate,
    -            n_layers,
    -            gin_channels=gin_channels,
    -        )
    -        self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
    -
    -    def forward(self, x, x_lengths, g=None):
    -        x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
    -            x.dtype
    -        )
    -        x = self.pre(x) * x_mask
    -        x = self.enc(x, x_mask, g=g)
    -        stats = self.proj(x) * x_mask
    -        m, logs = torch.split(stats, self.out_channels, dim=1)
    -        z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
    -        return z, m, logs, x_mask
    -
    -    def remove_weight_norm(self):
    -        self.enc.remove_weight_norm()
    -
    -
    -class Generator(torch.nn.Module):
    -    def __init__(
    -        self,
    -        initial_channel,
    -        resblock,
    -        resblock_kernel_sizes,
    -        resblock_dilation_sizes,
    -        upsample_rates,
    -        upsample_initial_channel,
    -        upsample_kernel_sizes,
    -        gin_channels=0,
    -    ):
    -        super(Generator, self).__init__()
    -        self.num_kernels = len(resblock_kernel_sizes)
    -        self.num_upsamples = len(upsample_rates)
    -        self.conv_pre = Conv1d(
    -            initial_channel, upsample_initial_channel, 7, 1, padding=3
    -        )
    -        resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
    -
    -        self.ups = nn.ModuleList()
    -        for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
    -            self.ups.append(
    -                weight_norm(
    -                    ConvTranspose1d(
    -                        upsample_initial_channel // (2**i),
    -                        upsample_initial_channel // (2 ** (i + 1)),
    -                        k,
    -                        u,
    -                        padding=(k - u) // 2,
    -                    )
    -                )
    -            )
    -
    -        self.resblocks = nn.ModuleList()
    -        for i in range(len(self.ups)):
    -            ch = upsample_initial_channel // (2 ** (i + 1))
    -            for j, (k, d) in enumerate(
    -                zip(resblock_kernel_sizes, resblock_dilation_sizes)
    -            ):
    -                self.resblocks.append(resblock(ch, k, d))
    -
    -        self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
    -        self.ups.apply(init_weights)
    -
    -        if gin_channels != 0:
    -            self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
    -
    -    def forward(self, x, g=None):
    -        x = self.conv_pre(x)
    -        if g is not None:
    -            x = x + self.cond(g)
    -
    -        for i in range(self.num_upsamples):
    -            x = F.leaky_relu(x, modules.LRELU_SLOPE)
    -            x = self.ups[i](x)
    -            xs = None
    -            for j in range(self.num_kernels):
    -                if xs is None:
    -                    xs = self.resblocks[i * self.num_kernels + j](x)
    -                else:
    -                    xs += self.resblocks[i * self.num_kernels + j](x)
    -            x = xs / self.num_kernels
    -        x = F.leaky_relu(x)
    -        x = self.conv_post(x)
    -        x = torch.tanh(x)
    -
    -        return x
    -
    -    def remove_weight_norm(self):
    -        for l in self.ups:
    -            remove_weight_norm(l)
    -        for l in self.resblocks:
    -            l.remove_weight_norm()
    -
    -
    -class SineGen(torch.nn.Module):
    -    """Definition of sine generator
    -    SineGen(samp_rate, harmonic_num = 0,
    -            sine_amp = 0.1, noise_std = 0.003,
    -            voiced_threshold = 0,
    -            flag_for_pulse=False)
    -    samp_rate: sampling rate in Hz
    -    harmonic_num: number of harmonic overtones (default 0)
    -    sine_amp: amplitude of sine-wavefrom (default 0.1)
    -    noise_std: std of Gaussian noise (default 0.003)
    -    voiced_thoreshold: F0 threshold for U/V classification (default 0)
    -    flag_for_pulse: this SinGen is used inside PulseGen (default False)
    -    Note: when flag_for_pulse is True, the first time step of a voiced
    -        segment is always sin(np.pi) or cos(0)
    -    """
    -
    -    def __init__(
    -        self,
    -        samp_rate,
    -        harmonic_num=0,
    -        sine_amp=0.1,
    -        noise_std=0.003,
    -        voiced_threshold=0,
    -        flag_for_pulse=False,
    -    ):
    -        super(SineGen, self).__init__()
    -        self.sine_amp = sine_amp
    -        self.noise_std = noise_std
    -        self.harmonic_num = harmonic_num
    -        self.dim = self.harmonic_num + 1
    -        self.sampling_rate = samp_rate
    -        self.voiced_threshold = voiced_threshold
    -
    -    def _f02uv(self, f0):
    -        # generate uv signal
    -        uv = torch.ones_like(f0)
    -        uv = uv * (f0 > self.voiced_threshold)
    -        return uv
    -
    -    def forward(self, f0, upp):
    -        """sine_tensor, uv = forward(f0)
    -        input F0: tensor(batchsize=1, length, dim=1)
    -                  f0 for unvoiced steps should be 0
    -        output sine_tensor: tensor(batchsize=1, length, dim)
    -        output uv: tensor(batchsize=1, length, 1)
    -        """
    -        with torch.no_grad():
    -            f0 = f0[:, None].transpose(1, 2)
    -            f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
    -            # fundamental component
    -            f0_buf[:, :, 0] = f0[:, :, 0]
    -            for idx in np.arange(self.harmonic_num):
    -                f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
    -                    idx + 2
    -                )  # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
    -            rad_values = (f0_buf / self.sampling_rate) % 1  ###%1意味着n_har的乘积无法后处理优化
    -            rand_ini = torch.rand(
    -                f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
    -            )
    -            rand_ini[:, 0] = 0
    -            rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
    -            tmp_over_one = torch.cumsum(rad_values, 1)  # % 1  #####%1意味着后面的cumsum无法再优化
    -            tmp_over_one *= upp
    -            tmp_over_one = F.interpolate(
    -                tmp_over_one.transpose(2, 1),
    -                scale_factor=upp,
    -                mode="linear",
    -                align_corners=True,
    -            ).transpose(2, 1)
    -            rad_values = F.interpolate(
    -                rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
    -            ).transpose(
    -                2, 1
    -            )  #######
    -            tmp_over_one %= 1
    -            tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
    -            cumsum_shift = torch.zeros_like(rad_values)
    -            cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
    -            sine_waves = torch.sin(
    -                torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
    -            )
    -            sine_waves = sine_waves * self.sine_amp
    -            uv = self._f02uv(f0)
    -            uv = F.interpolate(
    -                uv.transpose(2, 1), scale_factor=upp, mode="nearest"
    -            ).transpose(2, 1)
    -            noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
    -            noise = noise_amp * torch.randn_like(sine_waves)
    -            sine_waves = sine_waves * uv + noise
    -        return sine_waves, uv, noise
    -
    -
    -class SourceModuleHnNSF(torch.nn.Module):
    -    """SourceModule for hn-nsf
    -    SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
    -                 add_noise_std=0.003, voiced_threshod=0)
    -    sampling_rate: sampling_rate in Hz
    -    harmonic_num: number of harmonic above F0 (default: 0)
    -    sine_amp: amplitude of sine source signal (default: 0.1)
    -    add_noise_std: std of additive Gaussian noise (default: 0.003)
    -        note that amplitude of noise in unvoiced is decided
    -        by sine_amp
    -    voiced_threshold: threhold to set U/V given F0 (default: 0)
    -    Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
    -    F0_sampled (batchsize, length, 1)
    -    Sine_source (batchsize, length, 1)
    -    noise_source (batchsize, length 1)
    -    uv (batchsize, length, 1)
    -    """
    -
    -    def __init__(
    -        self,
    -        sampling_rate,
    -        harmonic_num=0,
    -        sine_amp=0.1,
    -        add_noise_std=0.003,
    -        voiced_threshod=0,
    -        is_half=True,
    -    ):
    -        super(SourceModuleHnNSF, self).__init__()
    -
    -        self.sine_amp = sine_amp
    -        self.noise_std = add_noise_std
    -        self.is_half = is_half
    -        # to produce sine waveforms
    -        self.l_sin_gen = SineGen(
    -            sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
    -        )
    -
    -        # to merge source harmonics into a single excitation
    -        self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
    -        self.l_tanh = torch.nn.Tanh()
    -
    -    def forward(self, x, upp=None):
    -        sine_wavs, uv, _ = self.l_sin_gen(x, upp)
    -        if self.is_half:
    -            sine_wavs = sine_wavs.half()
    -        sine_merge = self.l_tanh(self.l_linear(sine_wavs))
    -        return sine_merge, None, None  # noise, uv
    -
    -
    -class GeneratorNSF(torch.nn.Module):
    -    def __init__(
    -        self,
    -        initial_channel,
    -        resblock,
    -        resblock_kernel_sizes,
    -        resblock_dilation_sizes,
    -        upsample_rates,
    -        upsample_initial_channel,
    -        upsample_kernel_sizes,
    -        gin_channels,
    -        sr,
    -        is_half=False,
    -    ):
    -        super(GeneratorNSF, self).__init__()
    -        self.num_kernels = len(resblock_kernel_sizes)
    -        self.num_upsamples = len(upsample_rates)
    -
    -        self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
    -        self.m_source = SourceModuleHnNSF(
    -            sampling_rate=sr, harmonic_num=0, is_half=is_half
    -        )
    -        self.noise_convs = nn.ModuleList()
    -        self.conv_pre = Conv1d(
    -            initial_channel, upsample_initial_channel, 7, 1, padding=3
    -        )
    -        resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
    -
    -        self.ups = nn.ModuleList()
    -        for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
    -            c_cur = upsample_initial_channel // (2 ** (i + 1))
    -            self.ups.append(
    -                weight_norm(
    -                    ConvTranspose1d(
    -                        upsample_initial_channel // (2**i),
    -                        upsample_initial_channel // (2 ** (i + 1)),
    -                        k,
    -                        u,
    -                        padding=(k - u) // 2,
    -                    )
    -                )
    -            )
    -            if i + 1 < len(upsample_rates):
    -                stride_f0 = np.prod(upsample_rates[i + 1 :])
    -                self.noise_convs.append(
    -                    Conv1d(
    -                        1,
    -                        c_cur,
    -                        kernel_size=stride_f0 * 2,
    -                        stride=stride_f0,
    -                        padding=stride_f0 // 2,
    -                    )
    -                )
    -            else:
    -                self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
    -
    -        self.resblocks = nn.ModuleList()
    -        for i in range(len(self.ups)):
    -            ch = upsample_initial_channel // (2 ** (i + 1))
    -            for j, (k, d) in enumerate(
    -                zip(resblock_kernel_sizes, resblock_dilation_sizes)
    -            ):
    -                self.resblocks.append(resblock(ch, k, d))
    -
    -        self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
    -        self.ups.apply(init_weights)
    -
    -        if gin_channels != 0:
    -            self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
    -
    -        self.upp = np.prod(upsample_rates)
    -
    -    def forward(self, x, f0, g=None):
    -        har_source, noi_source, uv = self.m_source(f0, self.upp)
    -        har_source = har_source.transpose(1, 2)
    -        x = self.conv_pre(x)
    -        if g is not None:
    -            x = x + self.cond(g)
    -
    -        for i in range(self.num_upsamples):
    -            x = F.leaky_relu(x, modules.LRELU_SLOPE)
    -            x = self.ups[i](x)
    -            x_source = self.noise_convs[i](har_source)
    -            x = x + x_source
    -            xs = None
    -            for j in range(self.num_kernels):
    -                if xs is None:
    -                    xs = self.resblocks[i * self.num_kernels + j](x)
    -                else:
    -                    xs += self.resblocks[i * self.num_kernels + j](x)
    -            x = xs / self.num_kernels
    -        x = F.leaky_relu(x)
    -        x = self.conv_post(x)
    -        x = torch.tanh(x)
    -        return x
    -
    -    def remove_weight_norm(self):
    -        for l in self.ups:
    -            remove_weight_norm(l)
    -        for l in self.resblocks:
    -            l.remove_weight_norm()
    -
    -
    -sr2sr = {
    -    "32k": 32000,
    -    "40k": 40000,
    -    "48k": 48000,
    -}
    -
    -
    -class SynthesizerTrnMsNSFsidM(nn.Module):
    -    def __init__(
    -        self,
    -        spec_channels,
    -        segment_size,
    -        inter_channels,
    -        hidden_channels,
    -        filter_channels,
    -        n_heads,
    -        n_layers,
    -        kernel_size,
    -        p_dropout,
    -        resblock,
    -        resblock_kernel_sizes,
    -        resblock_dilation_sizes,
    -        upsample_rates,
    -        upsample_initial_channel,
    -        upsample_kernel_sizes,
    -        spk_embed_dim,
    -        gin_channels,
    -        sr,
    -        version,
    -        **kwargs
    -    ):
    -        super().__init__()
    -        if type(sr) == type("strr"):
    -            sr = sr2sr[sr]
    -        self.spec_channels = spec_channels
    -        self.inter_channels = inter_channels
    -        self.hidden_channels = hidden_channels
    -        self.filter_channels = filter_channels
    -        self.n_heads = n_heads
    -        self.n_layers = n_layers
    -        self.kernel_size = kernel_size
    -        self.p_dropout = p_dropout
    -        self.resblock = resblock
    -        self.resblock_kernel_sizes = resblock_kernel_sizes
    -        self.resblock_dilation_sizes = resblock_dilation_sizes
    -        self.upsample_rates = upsample_rates
    -        self.upsample_initial_channel = upsample_initial_channel
    -        self.upsample_kernel_sizes = upsample_kernel_sizes
    -        self.segment_size = segment_size
    -        self.gin_channels = gin_channels
    -        # self.hop_length = hop_length#
    -        self.spk_embed_dim = spk_embed_dim
    -        if version == "v1":
    -            self.enc_p = TextEncoder256(
    -                inter_channels,
    -                hidden_channels,
    -                filter_channels,
    -                n_heads,
    -                n_layers,
    -                kernel_size,
    -                p_dropout,
    -            )
    -        else:
    -            self.enc_p = TextEncoder768(
    -                inter_channels,
    -                hidden_channels,
    -                filter_channels,
    -                n_heads,
    -                n_layers,
    -                kernel_size,
    -                p_dropout,
    -            )
    -        self.dec = GeneratorNSF(
    -            inter_channels,
    -            resblock,
    -            resblock_kernel_sizes,
    -            resblock_dilation_sizes,
    -            upsample_rates,
    -            upsample_initial_channel,
    -            upsample_kernel_sizes,
    -            gin_channels=gin_channels,
    -            sr=sr,
    -            is_half=kwargs["is_half"],
    -        )
    -        self.enc_q = PosteriorEncoder(
    -            spec_channels,
    -            inter_channels,
    -            hidden_channels,
    -            5,
    -            1,
    -            16,
    -            gin_channels=gin_channels,
    -        )
    -        self.flow = ResidualCouplingBlock(
    -            inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
    -        )
    -        self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
    -        self.speaker_map = None
    -        print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
    -
    -    def remove_weight_norm(self):
    -        self.dec.remove_weight_norm()
    -        self.flow.remove_weight_norm()
    -        self.enc_q.remove_weight_norm()
    -
    -    def construct_spkmixmap(self, n_speaker):
    -        self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels))
    -        for i in range(n_speaker):
    -            self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]]))
    -        self.speaker_map = self.speaker_map.unsqueeze(0)
    -
    -    def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None):
    -        if self.speaker_map is not None:  # [N, S]  *  [S, B, 1, H]
    -            g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1))  # [N, S, B, 1, 1]
    -            g = g * self.speaker_map  # [N, S, B, 1, H]
    -            g = torch.sum(g, dim=1)  # [N, 1, B, 1, H]
    -            g = g.transpose(0, -1).transpose(0, -2).squeeze(0)  # [B, H, N]
    -        else:
    -            g = g.unsqueeze(0)
    -            g = self.emb_g(g).transpose(1, 2)
    -
    -        m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
    -        z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
    -        z = self.flow(z_p, x_mask, g=g, reverse=True)
    -        o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
    -        return o
    -
    -
    -class MultiPeriodDiscriminator(torch.nn.Module):
    -    def __init__(self, use_spectral_norm=False):
    -        super(MultiPeriodDiscriminator, self).__init__()
    -        periods = [2, 3, 5, 7, 11, 17]
    -        # periods = [3, 5, 7, 11, 17, 23, 37]
    -
    -        discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
    -        discs = discs + [
    -            DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
    -        ]
    -        self.discriminators = nn.ModuleList(discs)
    -
    -    def forward(self, y, y_hat):
    -        y_d_rs = []  #
    -        y_d_gs = []
    -        fmap_rs = []
    -        fmap_gs = []
    -        for i, d in enumerate(self.discriminators):
    -            y_d_r, fmap_r = d(y)
    -            y_d_g, fmap_g = d(y_hat)
    -            # for j in range(len(fmap_r)):
    -            #     print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
    -            y_d_rs.append(y_d_r)
    -            y_d_gs.append(y_d_g)
    -            fmap_rs.append(fmap_r)
    -            fmap_gs.append(fmap_g)
    -
    -        return y_d_rs, y_d_gs, fmap_rs, fmap_gs
    -
    -
    -class MultiPeriodDiscriminatorV2(torch.nn.Module):
    -    def __init__(self, use_spectral_norm=False):
    -        super(MultiPeriodDiscriminatorV2, self).__init__()
    -        # periods = [2, 3, 5, 7, 11, 17]
    -        periods = [2, 3, 5, 7, 11, 17, 23, 37]
    -
    -        discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
    -        discs = discs + [
    -            DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
    -        ]
    -        self.discriminators = nn.ModuleList(discs)
    -
    -    def forward(self, y, y_hat):
    -        y_d_rs = []  #
    -        y_d_gs = []
    -        fmap_rs = []
    -        fmap_gs = []
    -        for i, d in enumerate(self.discriminators):
    -            y_d_r, fmap_r = d(y)
    -            y_d_g, fmap_g = d(y_hat)
    -            # for j in range(len(fmap_r)):
    -            #     print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
    -            y_d_rs.append(y_d_r)
    -            y_d_gs.append(y_d_g)
    -            fmap_rs.append(fmap_r)
    -            fmap_gs.append(fmap_g)
    -
    -        return y_d_rs, y_d_gs, fmap_rs, fmap_gs
    -
    -
    -class DiscriminatorS(torch.nn.Module):
    -    def __init__(self, use_spectral_norm=False):
    -        super(DiscriminatorS, self).__init__()
    -        norm_f = weight_norm if use_spectral_norm == False else spectral_norm
    -        self.convs = nn.ModuleList(
    -            [
    -                norm_f(Conv1d(1, 16, 15, 1, padding=7)),
    -                norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
    -                norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
    -                norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
    -                norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
    -                norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
    -            ]
    -        )
    -        self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
    -
    -    def forward(self, x):
    -        fmap = []
    -
    -        for l in self.convs:
    -            x = l(x)
    -            x = F.leaky_relu(x, modules.LRELU_SLOPE)
    -            fmap.append(x)
    -        x = self.conv_post(x)
    -        fmap.append(x)
    -        x = torch.flatten(x, 1, -1)
    -
    -        return x, fmap
    -
    -
    -class DiscriminatorP(torch.nn.Module):
    -    def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
    -        super(DiscriminatorP, self).__init__()
    -        self.period = period
    -        self.use_spectral_norm = use_spectral_norm
    -        norm_f = weight_norm if use_spectral_norm == False else spectral_norm
    -        self.convs = nn.ModuleList(
    -            [
    -                norm_f(
    -                    Conv2d(
    -                        1,
    -                        32,
    -                        (kernel_size, 1),
    -                        (stride, 1),
    -                        padding=(get_padding(kernel_size, 1), 0),
    -                    )
    -                ),
    -                norm_f(
    -                    Conv2d(
    -                        32,
    -                        128,
    -                        (kernel_size, 1),
    -                        (stride, 1),
    -                        padding=(get_padding(kernel_size, 1), 0),
    -                    )
    -                ),
    -                norm_f(
    -                    Conv2d(
    -                        128,
    -                        512,
    -                        (kernel_size, 1),
    -                        (stride, 1),
    -                        padding=(get_padding(kernel_size, 1), 0),
    -                    )
    -                ),
    -                norm_f(
    -                    Conv2d(
    -                        512,
    -                        1024,
    -                        (kernel_size, 1),
    -                        (stride, 1),
    -                        padding=(get_padding(kernel_size, 1), 0),
    -                    )
    -                ),
    -                norm_f(
    -                    Conv2d(
    -                        1024,
    -                        1024,
    -                        (kernel_size, 1),
    -                        1,
    -                        padding=(get_padding(kernel_size, 1), 0),
    -                    )
    -                ),
    -            ]
    -        )
    -        self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
    -
    -    def forward(self, x):
    -        fmap = []
    -
    -        # 1d to 2d
    -        b, c, t = x.shape
    -        if t % self.period != 0:  # pad first
    -            n_pad = self.period - (t % self.period)
    -            x = F.pad(x, (0, n_pad), "reflect")
    -            t = t + n_pad
    -        x = x.view(b, c, t // self.period, self.period)
    -
    -        for l in self.convs:
    -            x = l(x)
    -            x = F.leaky_relu(x, modules.LRELU_SLOPE)
    -            fmap.append(x)
    -        x = self.conv_post(x)
    -        fmap.append(x)
    -        x = torch.flatten(x, 1, -1)
    -
    -        return x, fmap
    diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py
    deleted file mode 100644
    index c9a035f15cfad12ddbbfa87ed0d579c1cde0c4ce..0000000000000000000000000000000000000000
    --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py
    +++ /dev/null
    @@ -1,13 +0,0 @@
    -_base_ = './ga_faster_r50_fpn_1x_coco.py'
    -model = dict(
    -    pretrained='open-mmlab://resnext101_32x4d',
    -    backbone=dict(
    -        type='ResNeXt',
    -        depth=101,
    -        groups=32,
    -        base_width=4,
    -        num_stages=4,
    -        out_indices=(0, 1, 2, 3),
    -        frozen_stages=1,
    -        norm_cfg=dict(type='BN', requires_grad=True),
    -        style='pytorch'))
    diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py
    deleted file mode 100644
    index 0048965d5b4d2257eed860f9bd69256795b44fa6..0000000000000000000000000000000000000000
    --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py
    +++ /dev/null
    @@ -1,4 +0,0 @@
    -_base_ = './ga_retinanet_r50_caffe_fpn_1x_coco.py'
    -model = dict(
    -    pretrained='open-mmlab://detectron2/resnet101_caffe',
    -    backbone=dict(depth=101))
    diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/metrics/clap_consistency.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/metrics/clap_consistency.py
    deleted file mode 100644
    index d2a6c61ae177533ca2fb17e25bc77d2acbbe3791..0000000000000000000000000000000000000000
    --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/metrics/clap_consistency.py
    +++ /dev/null
    @@ -1,84 +0,0 @@
    -# Copyright (c) Meta Platforms, Inc. and affiliates.
    -# All rights reserved.
    -#
    -# This source code is licensed under the license found in the
    -# LICENSE file in the root directory of this source tree.
    -
    -from pathlib import Path
    -import typing as tp
    -
    -import torch
    -import torchmetrics
    -from transformers import RobertaTokenizer  # type: ignore
    -
    -from ..data.audio_utils import convert_audio
    -from ..environment import AudioCraftEnvironment
    -from ..utils.utils import load_clap_state_dict
    -
    -try:
    -    import laion_clap  # type: ignore
    -except ImportError:
    -    laion_clap = None
    -
    -
    -class TextConsistencyMetric(torchmetrics.Metric):
    -    """Text consistency metric measuring consistency between audio and text pairs."""
    -
    -    def update(self, audio: torch.Tensor, text: tp.List[str], sizes: torch.Tensor, sample_rates: torch.Tensor) -> None:
    -        raise NotImplementedError("implement how to update the metric from the audio and text pairs.")
    -
    -    def compute(self):
    -        raise NotImplementedError("implement how to compute the final metric score.")
    -
    -
    -class CLAPTextConsistencyMetric(TextConsistencyMetric):
    -    """Text consistency metric relying on Contrastive Language-Audio Pretraining (CLAP).
    -
    -    This metric is similar to the MuLan Cycle Consistency from MusicLM (https://arxiv.org/pdf/2301.11325.pdf)
    -    or the CLAP score used in Make-An-Audio (https://arxiv.org/pdf/2301.12661v1.pdf).
    -
    -    As a joint audio-text embedding model, a pretrained CLAP model can be used to quantify the
    -    similarity between audio-text pairs. We compute the CLAP embeddings from the text descriptions as
    -    well as the generated audio based on them, and define the MCC metric as the average cosine similarity
    -    between these embeddings.
    -
    -    Model implementation & pre-trained checkpoints: https://github.com/LAION-AI/CLAP
    -    """
    -    def __init__(self, model_path: tp.Union[str, Path], model_arch: str = 'HTSAT-tiny', enable_fusion: bool = False):
    -        super().__init__()
    -        if laion_clap is None:
    -            raise ImportError("Please install CLAP to compute text consistency: 'pip install laion_clap'")
    -        self.add_state("cosine_sum", default=torch.tensor(0.), dist_reduce_fx="sum")
    -        self.add_state("weight", default=torch.tensor(0.), dist_reduce_fx="sum")
    -        self._initialize_model(model_path, model_arch, enable_fusion)
    -
    -    def _initialize_model(self, model_path: tp.Union[str, Path], model_arch: str, enable_fusion: bool):
    -        model_path = AudioCraftEnvironment.resolve_reference_path(model_path)
    -        self.tokenize = RobertaTokenizer.from_pretrained('roberta-base')
    -        self.model = laion_clap.CLAP_Module(enable_fusion=enable_fusion, amodel=model_arch)
    -        self.model_sample_rate = 48_000
    -        load_clap_state_dict(self.model, model_path)
    -        self.model.eval()
    -
    -    def _tokenizer(self, texts: tp.Union[str, tp.List[str]]) -> dict:
    -        # we use the default params from CLAP module here as well
    -        return self.tokenize(texts, padding="max_length", truncation=True, max_length=77, return_tensors="pt")
    -
    -    def update(self, audio: torch.Tensor, text: tp.List[str], sizes: torch.Tensor, sample_rates: torch.Tensor) -> None:
    -        """Compute cosine similarity between audio and text pairs and accumulate scores over the dataset."""
    -        assert audio.size(0) == len(text), "Number of audio and text samples should match"
    -        assert torch.all(sample_rates == sample_rates[0].item()), "All items in batch should have the same sample rate"
    -        sample_rate = int(sample_rates[0].item())
    -        # convert audio batch to 48kHz monophonic audio with no channel dimension: [B, C, T] -> [B, T]
    -        audio = convert_audio(audio, from_rate=sample_rate, to_rate=self.model_sample_rate, to_channels=1).mean(dim=1)
    -        audio_embeddings = self.model.get_audio_embedding_from_data(audio, use_tensor=True)
    -        text_embeddings = self.model.get_text_embedding(text, tokenizer=self._tokenizer, use_tensor=True)
    -        # cosine similarity between the text and the audio embedding
    -        cosine_sim = torch.nn.functional.cosine_similarity(audio_embeddings, text_embeddings, dim=1, eps=1e-8)
    -        self.cosine_sum += cosine_sim.sum(dim=0)
    -        self.weight += torch.tensor(cosine_sim.size(0))
    -
    -    def compute(self):
    -        """Computes the average cosine similarty across all audio/text pairs."""
    -        assert self.weight.item() > 0, "Unable to compute with total number of comparisons <= 0"  # type: ignore
    -        return (self.cosine_sum / self.weight).item()  # type: ignore
    diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/optim/ema.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/optim/ema.py
    deleted file mode 100644
    index 4337eaff066a8ca124dca3e3e63ee36e417c055c..0000000000000000000000000000000000000000
    --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/optim/ema.py
    +++ /dev/null
    @@ -1,85 +0,0 @@
    -# Copyright (c) Meta Platforms, Inc. and affiliates.
    -# All rights reserved.
    -#
    -# This source code is licensed under the license found in the
    -# LICENSE file in the root directory of this source tree.
    -
    -# ModelEMA implementation is taken from
    -# https://github.com/facebookresearch/demucs
    -
    -from collections import defaultdict
    -import typing as tp
    -
    -import torch
    -import torch.nn as nn
    -
    -
    -def _get_all_non_persistent_buffers_set(module: nn.Module, root: str = "") -> set:
    -    names: set = set()
    -    for (name, sub_module) in module.named_modules():
    -        if name == '':
    -            buffer_names = module._non_persistent_buffers_set
    -            buffer_names = {f"{root}.{buff_name}" if len(root) > 0 else buff_name
    -                            for buff_name in buffer_names}
    -            names.update(buffer_names)
    -        else:
    -            sub_name = f"{root}.{name}" if len(root) > 0 else name
    -            sub_buffer_names = _get_all_non_persistent_buffers_set(sub_module, sub_name)
    -            names.update(sub_buffer_names)
    -    return names
    -
    -
    -def _get_named_tensors(module: nn.Module):
    -    non_persistent_buffers_set = _get_all_non_persistent_buffers_set(module)
    -    named_buffers = [(name, buffer) for (name, buffer) in module.named_buffers()
    -                     if name not in non_persistent_buffers_set]
    -    named_parameters = list(module.named_parameters())
    -    return named_parameters + named_buffers
    -
    -
    -class ModuleDictEMA:
    -    """Exponential Moving Average over a nn.ModuleDict.
    -
    -    You can switch to the EMA weights temporarily.
    -    """
    -    def __init__(self, module_dict: nn.ModuleDict, decay: float = 0.999,
    -                 unbias: bool = True, device: tp.Union[torch.device, str] = 'cpu'):
    -        self.decay = decay
    -        self.module_dict = module_dict
    -        self.state: dict = defaultdict(dict)
    -        self.count = 0
    -        self.device = device
    -        self.unbias = unbias
    -        self._init()
    -
    -    def _init(self):
    -        for module_name, module in self.module_dict.items():
    -            for key, val in _get_named_tensors(module):
    -                if not val.is_floating_point():
    -                    continue
    -                device = self.device or val.device
    -                if key not in self.state[module_name]:
    -                    self.state[module_name][key] = val.detach().to(device, copy=True)
    -
    -    def step(self):
    -        if self.unbias:
    -            self.count = self.count * self.decay + 1
    -            w = 1 / self.count
    -        else:
    -            w = 1 - self.decay
    -        for module_name, module in self.module_dict.items():
    -            for key, val in _get_named_tensors(module):
    -                if not val.is_floating_point():
    -                    continue
    -                device = self.device or val.device
    -                self.state[module_name][key].mul_(1 - w)
    -                self.state[module_name][key].add_(val.detach().to(device), alpha=w)
    -
    -    def state_dict(self):
    -        return {'state': self.state, 'count': self.count}
    -
    -    def load_state_dict(self, state):
    -        self.count = state['count']
    -        for module_name, module in state['state'].items():
    -            for key, val in module.items():
    -                self.state[module_name][key].copy_(val)
    diff --git a/spaces/HaloMaster/chinesesummary/fengshen/examples/zen2_finetune/fs_zen2_large_iflytek.sh b/spaces/HaloMaster/chinesesummary/fengshen/examples/zen2_finetune/fs_zen2_large_iflytek.sh
    deleted file mode 100644
    index 7afd7b24d27ddd1a6834935222a100351111d570..0000000000000000000000000000000000000000
    --- a/spaces/HaloMaster/chinesesummary/fengshen/examples/zen2_finetune/fs_zen2_large_iflytek.sh
    +++ /dev/null
    @@ -1,93 +0,0 @@
    -#!/bin/bash
    -#SBATCH --job-name=zen2_large_iflytek # create a short name for your job
    -#SBATCH --nodes=1 # node count
    -#SBATCH --ntasks=1 # total number of tasks across all nodes
    -#SBATCH --cpus-per-task=30 # cpu-cores per task (>1 if multi-threaded tasks)
    -#SBATCH --gres=gpu:1 # number of gpus per node
    -#SBATCH --mail-type=ALL # send email when job begins, ends or failed etc. 
    -#SBATCH -o %x-%j.log # output and error file name (%x=job name, %j=job id)
    -
    -
    -export CUDA_VISIBLE_DEVICES='5'
    -export TORCH_EXTENSIONS_DIR=/cognitive_comp/ganruyi/tmp/torch_extendsions
    -
    -MODEL_NAME=zen2_large
    -
    -TASK=iflytek
    -
    -ZERO_STAGE=1
    -STRATEGY=deepspeed_stage_${ZERO_STAGE}
    -
    -ROOT_DIR=/cognitive_comp/ganruyi/experiments/classification_finetune/${MODEL_NAME}_${TASK}
    -if [ ! -d ${ROOT_DIR} ];then
    -  mkdir -p ${ROOT_DIR}
    -  echo ${ROOT_DIR} created!!!!!!!!!!!!!!
    -else
    -  echo ${ROOT_DIR} exist!!!!!!!!!!!!!!!
    -fi
    -
    -DATA_DIR=/cognitive_comp/yangping/data/ChineseCLUE_DATA/${TASK}_public/
    -PRETRAINED_MODEL_PATH=/cognitive_comp/ganruyi/hf_models/zen/zh_zen_large_2.0
    -
    -CHECKPOINT_PATH=${ROOT_DIR}/ckpt/
    -OUTPUT_PATH=${ROOT_DIR}/predict.json
    -
    -DATA_ARGS="\
    -        --data_dir $DATA_DIR \
    -        --train_data train.json \
    -        --valid_data dev.json \
    -        --test_data test.json \
    -        --train_batchsize 32 \
    -        --valid_batchsize 16 \
    -        --max_seq_length 128 \
    -        --texta_name sentence \
    -        --label_name label \
    -        --id_name id \
    -        --task_name iflytek \
    -        "
    -
    -MODEL_ARGS="\
    -        --learning_rate 2e-5 \
    -        --weight_decay 0.1 \
    -        --warmup_ratio 0.01 \
    -        --num_labels 119 \
    -        "
    -
    -MODEL_CHECKPOINT_ARGS="\
    -        --monitor val_acc \
    -        --save_top_k 3 \
    -        --mode max \
    -        --every_n_train_steps 100 \
    -        --save_weights_only True \
    -        --dirpath $CHECKPOINT_PATH \
    -        --filename model-{epoch:02d}-{val_acc:.4f} \
    -        "
    -
    -TRAINER_ARGS="\
    -        --max_epochs 7 \
    -        --gpus 1 \
    -        --check_val_every_n_epoch 1 \
    -        --val_check_interval 100 \
    -        --default_root_dir $ROOT_DIR \
    -        "
    -
    -
    -options=" \
    -        --pretrained_model_path $PRETRAINED_MODEL_PATH \
    -        --vocab_file $PRETRAINED_MODEL_PATH/vocab.txt \
    -        --do_lower_case \
    -        --output_save_path $OUTPUT_PATH \
    -        $DATA_ARGS \
    -        $MODEL_ARGS \
    -        $MODEL_CHECKPOINT_ARGS \
    -        $TRAINER_ARGS \
    -"
    -SCRIPT_PATH=/cognitive_comp/ganruyi/Fengshenbang-LM/fengshen/examples/zen2_finetune/fengshen_sequence_level_ft_task.py
    -/home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options
    -
    -# SINGULARITY_PATH=/cognitive_comp/ganruyi/pytorch21_06_py3_docker_image_v2.sif
    -# python3 $SCRIPT_PATH $options
    -# source activate base
    -# singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $SINGULARITY_PATH /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options
    -# /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options
    -
    diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/discriminative_reranking_nmt/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/discriminative_reranking_nmt/__init__.py
    deleted file mode 100644
    index 0278f6a27340c7ff7e207d09348483d1b0d3a100..0000000000000000000000000000000000000000
    --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/discriminative_reranking_nmt/__init__.py
    +++ /dev/null
    @@ -1 +0,0 @@
    -from . import criterions, models, tasks  # noqa
    diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/multilingual/data_scripts/download_lotus.sh b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/multilingual/data_scripts/download_lotus.sh
    deleted file mode 100644
    index c08c701314a8e575637deff78381ab02c2ef6728..0000000000000000000000000000000000000000
    --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/multilingual/data_scripts/download_lotus.sh
    +++ /dev/null
    @@ -1,46 +0,0 @@
    -#!/bin/bash
    -# Copyright (c) Facebook, Inc. and its affiliates.
    -# All rights reserved.
    -#
    -# This source code is licensed under the license found in the
    -# LICENSE file in the root directory of this source tree.
    -
    -
    -if [ -z $WORKDIR_ROOT ] ;
    -then
    -        echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..."
    -        exit
    -fi
    -
    -
    -SRCDIR=$WORKDIR_ROOT/indic_languages_corpus
    -DESTDIR=${WORKDIR_ROOT}/ML50/raw/
    -mkdir -p $SRCDIR
    -mkdir -p $DESTDIR
    -
    -cd $SRCDIR
    -wget http://lotus.kuee.kyoto-u.ac.jp/WAT/indic-multilingual/indic_languages_corpus.tar.gz
    -tar -xvzf indic_languages_corpus.tar.gz
    -
    -SRC_EXTRACT_DIR=$SRCDIR/indic_languages_corpus/bilingual
    -
    -cp $SRC_EXTRACT_DIR/ml-en/train.ml $DESTDIR/train.ml_IN-en_XX.ml_IN
    -cp $SRC_EXTRACT_DIR/ml-en/train.en $DESTDIR/train.ml_IN-en_XX.en_XX
    -cp $SRC_EXTRACT_DIR/ml-en/dev.ml $DESTDIR/valid.ml_IN-en_XX.ml_IN
    -cp $SRC_EXTRACT_DIR/ml-en/dev.en $DESTDIR/valid.ml_IN-en_XX.en_XX
    -cp $SRC_EXTRACT_DIR/ml-en/test.ml $DESTDIR/test.ml_IN-en_XX.ml_IN
    -cp $SRC_EXTRACT_DIR/ml-en/test.en $DESTDIR/test.ml_IN-en_XX.en_XX
    -
    -cp $SRC_EXTRACT_DIR/ur-en/train.ur $DESTDIR/train.ur_PK-en_XX.ur_PK
    -cp $SRC_EXTRACT_DIR/ur-en/train.en $DESTDIR/train.ur_PK-en_XX.en_XX
    -cp $SRC_EXTRACT_DIR/ur-en/dev.ur $DESTDIR/valid.ur_PK-en_XX.ur_PK
    -cp $SRC_EXTRACT_DIR/ur-en/dev.en $DESTDIR/valid.ur_PK-en_XX.en_XX
    -cp $SRC_EXTRACT_DIR/ur-en/test.ur $DESTDIR/test.ur_PK-en_XX.ur_PK
    -cp $SRC_EXTRACT_DIR/ur-en/test.en $DESTDIR/test.ur_PK-en_XX.en_XX
    -
    -cp $SRC_EXTRACT_DIR/te-en/train.te $DESTDIR/train.te_IN-en_XX.te_IN
    -cp $SRC_EXTRACT_DIR/te-en/train.en $DESTDIR/train.te_IN-en_XX.en_XX
    -cp $SRC_EXTRACT_DIR/te-en/dev.te $DESTDIR/valid.te_IN-en_XX.te_IN
    -cp $SRC_EXTRACT_DIR/te-en/dev.en $DESTDIR/valid.te_IN-en_XX.en_XX
    -cp $SRC_EXTRACT_DIR/te-en/test.te $DESTDIR/test.te_IN-en_XX.te_IN
    -cp $SRC_EXTRACT_DIR/te-en/test.en $DESTDIR/test.te_IN-en_XX.en_XX
    diff --git a/spaces/HarryLee/eCommerceImageCaptioning/run_scripts/caption/train_caption_stage1_el_db.sh b/spaces/HarryLee/eCommerceImageCaptioning/run_scripts/caption/train_caption_stage1_el_db.sh
    deleted file mode 100644
    index 3d32254e59d0da435675f45845a9e1b53947b024..0000000000000000000000000000000000000000
    --- a/spaces/HarryLee/eCommerceImageCaptioning/run_scripts/caption/train_caption_stage1_el_db.sh
    +++ /dev/null
    @@ -1,111 +0,0 @@
    -#!/usr/bin/env
    -
    -# The port for communication. Note that if you want to run multiple tasks on the same machine,
    -# you need to specify different port numbers.
    -export MASTER_PORT=1051
    -
    -log_dir=./stage1_logs
    -save_dir=./stage1_checkpoints
    -mkdir -p $log_dir $save_dir
    -
    -bpe_dir=../../utils/BPE
    -user_dir=../../ofa_module
    -
    -data_dir=../../dataset/caption_data
    -data=${data_dir}/caption_stage1_train.tsv,${data_dir}/caption_val.tsv
    -restore_file=../../checkpoints/ofa_large.pt
    -selected_cols=0,4,2
    -
    -task=caption
    -arch=ofa_large
    -criterion=adjust_label_smoothed_encouraging_loss # for el
    -label_smoothing=0.1
    -lr=1e-5
    -max_epoch=5
    -warmup_ratio=0.06
    -batch_size=8
    -update_freq=4
    -resnet_drop_path_rate=0.0
    -encoder_drop_path_rate=0.1
    -decoder_drop_path_rate=0.1
    -dropout=0.1
    -attention_dropout=0.0
    -max_src_length=80
    -max_tgt_length=20
    -num_bins=1000
    -patch_image_size=480
    -eval_cider_cached=${data_dir}/cider_cached_tokens/coco-valid-words.p
    -drop_worst_ratio=0.05 # modified from 0.2 for el
    -drop_best_ratio=0.05
    -drop_best_after=2500
    -log_end=0.75  # for el
    -for max_epoch in {2,}; do
    -  echo "max_epoch "${max_epoch}
    -  for warmup_ratio in {0.06,}; do
    -    echo "warmup_ratio "${warmup_ratio}
    -    for drop_worst_after in {2500,}; do
    -      echo "drop_worst_after "${drop_worst_after}
    -
    -      log_file=${log_dir}/${max_epoch}"_"${warmup_ratio}"_dwdb"${drop_worst_after}_el${log_end}_".log"
    -      save_path=${save_dir}/${max_epoch}"_"${warmup_ratio}"_dwdb"${drop_worst_after}_el${log_end}_
    -      mkdir -p $save_path
    -
    -      CUDA_VISIBLE_DEVICES=0,1,2,3 python3 -m torch.distributed.launch --nproc_per_node=4 --master_port=${MASTER_PORT} ../../train.py \
    -          $data \
    -          --selected-cols=${selected_cols} \
    -          --bpe-dir=${bpe_dir} \
    -          --user-dir=${user_dir} \
    -          --restore-file=${restore_file} \
    -          --reset-optimizer --reset-dataloader --reset-meters \
    -          --save-dir=${save_path} \
    -          --task=${task} \
    -          --arch=${arch} \
    -          --criterion=${criterion} \
    -          --label-smoothing=${label_smoothing} \
    -          --batch-size=${batch_size} \
    -          --update-freq=${update_freq} \
    -          --encoder-normalize-before \
    -          --decoder-normalize-before \
    -          --share-decoder-input-output-embed \
    -          --share-all-embeddings \
    -          --layernorm-embedding \
    -          --patch-layernorm-embedding \
    -          --code-layernorm-embedding \
    -          --resnet-drop-path-rate=${resnet_drop_path_rate} \
    -          --encoder-drop-path-rate=${encoder_drop_path_rate} \
    -          --decoder-drop-path-rate=${decoder_drop_path_rate} \
    -          --dropout=${dropout} \
    -          --attention-dropout=${attention_dropout} \
    -          --weight-decay=0.01 --optimizer=adam --adam-betas="(0.9,0.999)" --adam-eps=1e-08 --clip-norm=1.0 \
    -          --lr-scheduler=polynomial_decay --lr=${lr} \
    -          --max-epoch=${max_epoch} --warmup-ratio=${warmup_ratio} \
    -          --log-format=simple --log-interval=10 \
    -          --fixed-validation-seed=7 \
    -          --no-epoch-checkpoints --keep-best-checkpoints=1 \
    -          --save-interval=1 --validate-interval=1 \
    -          --save-interval-updates=500 --validate-interval-updates=500 \
    -          --eval-cider \
    -          --eval-cider-cached-tokens=${eval_cider_cached} \
    -          --eval-args='{"beam":5,"max_len_b":16,"no_repeat_ngram_size":3}' \
    -          --best-checkpoint-metric=cider --maximize-best-checkpoint-metric \
    -          --max-src-length=${max_src_length} \
    -          --max-tgt-length=${max_tgt_length} \
    -          --find-unused-parameters \
    -          --freeze-encoder-embedding \
    -          --freeze-decoder-embedding \
    -          --add-type-embedding \
    -          --scale-attn \
    -          --scale-fc \
    -          --scale-heads \
    -          --disable-entangle \
    -          --num-bins=${num_bins} \
    -          --patch-image-size=${patch_image_size} \
    -          --drop-worst-ratio=${drop_worst_ratio} \
    -          --drop-worst-after=${drop_worst_after} \
    -          --log-end ${log_end} --drop-best-ratio ${drop_best_ratio} --drop-best-after ${drop_best_after} \
    -          --fp16 \
    -          --fp16-scale-window=512 \
    -          --num-workers=0 > ${log_file} 2>&1
    -    done
    -  done
    -done
    diff --git a/spaces/Harsh12/Rossmann_Sales_Prediction/app.py b/spaces/Harsh12/Rossmann_Sales_Prediction/app.py
    deleted file mode 100644
    index 23b2b7dcb28f245870b81c8deab6949d1d46276e..0000000000000000000000000000000000000000
    --- a/spaces/Harsh12/Rossmann_Sales_Prediction/app.py
    +++ /dev/null
    @@ -1,137 +0,0 @@
    -import streamlit as st
    -import joblib 
    -import pickle
    -import pandas as pd
    -
    -# Load the model from disk
    -model = joblib.load('rf.sav')
    -# Load the model from disk
    -with open('scaler.pkl', 'rb') as file:
    -    scaler = pickle.load(file)
    -
    -st.title('Rossmann Sales Prediction App')
    -
    -st.write('This app takes in the several input parameters and predict the sales for a particular day of a 1115 rossmann stores.')
    -
    -
    -store = int(st.number_input('Store Number (select between 1-1115)', step=1, min_value=1, max_value=1115))
    -# st.write('Store Number is', store)
    -
    -week_days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
    -week_days_mapping = {'Monday':1, 'Tuesday':2, 'Wednesday':3, 'Thursday':4, 'Friday':5, 'Saturday':6, 'Sunday':7}
    -
    -week_days_input = st.selectbox('Select the day of the week', week_days)
    -
    -# st.write(week_days_input)
    -
    -col1, col2 = st.columns(2)
    -
    -
    -with col1:
    -    promo = ['yes', 'no']
    -    promo_map = {'yes':1, 'no':0}
    -
    -    promo_or_not = st.selectbox('Promotion was opted or not?', promo)
    -
    -with col2:
    -    school_holiday = ['yes', 'no']
    -    school_map = {'yes':1, 'no':0}
    -
    -    school_holiday_or_not = st.selectbox('Is there a School Holiday?', school_holiday)
    -
    -
    -col3, col4 = st.columns(2)
    -
    -with col3:
    -    year = st.number_input('Enter the year:',step=1, min_value=1973, max_value=2025)
    -    
    -
    -with col4:
    -    months = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
    -
    -    month_map = {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6,
    -              'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12}
    -
    -
    -    select_month = st.selectbox('Select the month', months)
    -
    -
    -days = st.number_input('Enter the day number for which you want to predict:', step=1, min_value=1, max_value=31)
    -
    -
    -stores_type = ['a', 'b', 'c', 'd']
    -stores_type_input = st.selectbox('Select the type of store', stores_type)
    -
    -assortment_type = ['basic', 'extra', 'extended']
    -assortment_type_map = {'basic':'a', 'extra':'b', 'extended':'c'}
    -assortment_type_input = st.selectbox('Select the assortment type (variations in the product)', assortment_type)
    -
    -customers = st.number_input('Enter the expected number of customers', step=5, min_value=5, max_value=7500)
    -comp_distance = st.number_input('Enter the distance in meters to the nearest competitor store', step=1)
    -comp_open_since_month = float(st.number_input('The month is which the nearest competitor store was opened (1-12)', step=1, min_value=1, max_value=12))
    -comp_open_since_year = float(st.number_input('The year in which the nearest competitor store was opened', step=1, min_value=1973, max_value=2025))
    -
    -
    -competition_open = (12* (year-comp_open_since_year)) + (month_map[select_month] - (comp_open_since_month))
    -
    -if stores_type_input == 'a':
    -    store_type_value_b = 0
    -    store_type_value_c = 0
    -    store_type_value_d = 0
    -
    -if stores_type_input == 'b':
    -    store_type_value_b = 1
    -    store_type_value_c = 0
    -    store_type_value_d = 0
    -
    -if stores_type_input == 'c':
    -    store_type_value_b = 0
    -    store_type_value_c = 1
    -    store_type_value_d = 0
    -
    -if stores_type_input == 'd':
    -    store_type_value_b = 0
    -    store_type_value_c = 0
    -    store_type_value_d = 1
    -
    -if assortment_type_input == 'basic':
    -    assortment_b = 0
    -    assortment_c = 0
    -
    -if assortment_type_input == 'extra':
    -    assortment_b = 1
    -    assortment_c = 0
    -
    -if assortment_type_input == 'extended':
    -    assortment_b = 0
    -    assortment_c = 1
    -
    -
    -
    -
    -
    -if st.button('Predict Sales'):
    -
    -    try:
    -        final_dict = {'Store':store, 'day_of_week':week_days_mapping[week_days_input], 
    -                    'promotion':promo_map[promo_or_not], 'school holiday':school_map[school_holiday_or_not],
    -                    'year':year, 'month':month_map[select_month], 'day':days, 'store b':store_type_value_b,
    -                    'store c':store_type_value_c, 'store d':store_type_value_d, 'assortment b':assortment_b,
    -                    'assortment c':assortment_c, 'customers':customers, 'Comp Dist':comp_distance,
    -                    'Comp_open': competition_open}
    -
    -
    -        final_df = pd.DataFrame([final_dict])
    -        final_df_scaled = scaler.transform(final_df)
    -        # st.write(final_df_scaled)
    -        # st.write(final_df)
    -
    -        sales = model.predict(final_df_scaled)
    -        st.write('The sales for this particular day of the store you selected is:', sales[0])
    -
    -    except Exception as e:
    -        st.error('There is something wrong, please enter the correct inputs', e)
    -
    -
    -
    -
    diff --git a/spaces/HarshulNanda/HARM_ML_App_ludwig/dataGenerators/thumbnail_dataGenerators/readme.md b/spaces/HarshulNanda/HARM_ML_App_ludwig/dataGenerators/thumbnail_dataGenerators/readme.md
    deleted file mode 100644
    index 82ac350fe083cf88ffbf2130ad9991e46a8283dd..0000000000000000000000000000000000000000
    --- a/spaces/HarshulNanda/HARM_ML_App_ludwig/dataGenerators/thumbnail_dataGenerators/readme.md
    +++ /dev/null
    @@ -1 +0,0 @@
    -## Thumbnails data generators and model
    \ No newline at end of file
    diff --git a/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/hifi/utils.py b/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/hifi/utils.py
    deleted file mode 100644
    index 71e9b2c99e053e2d4239074a67d64b834898c348..0000000000000000000000000000000000000000
    --- a/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/hifi/utils.py
    +++ /dev/null
    @@ -1,57 +0,0 @@
    -import glob
    -import os
    -import matplotlib
    -import torch
    -from torch.nn.utils import weight_norm
    -
    -matplotlib.use("Agg")
    -import matplotlib.pylab as plt
    -
    -
    -def plot_spectrogram(spectrogram):
    -    fig, ax = plt.subplots(figsize=(10, 2))
    -    im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none")
    -    plt.colorbar(im, ax=ax)
    -
    -    fig.canvas.draw()
    -    plt.close()
    -
    -    return fig
    -
    -
    -def init_weights(m, mean=0.0, std=0.01):
    -    classname = m.__class__.__name__
    -    if classname.find("Conv") != -1:
    -        m.weight.data.normal_(mean, std)
    -
    -
    -def apply_weight_norm(m):
    -    classname = m.__class__.__name__
    -    if classname.find("Conv") != -1:
    -        weight_norm(m)
    -
    -
    -def get_padding(kernel_size, dilation=1):
    -    return int((kernel_size * dilation - dilation) / 2)
    -
    -
    -def load_checkpoint(filepath, device):
    -    assert os.path.isfile(filepath)
    -    print("Loading '{}'".format(filepath))
    -    checkpoint_dict = torch.load(filepath, map_location=device)
    -    print("Complete.")
    -    return checkpoint_dict
    -
    -
    -def save_checkpoint(filepath, obj):
    -    print("Saving checkpoint to {}".format(filepath))
    -    torch.save(obj, filepath)
    -    print("Complete.")
    -
    -
    -def scan_checkpoint(cp_dir, prefix):
    -    pattern = os.path.join(cp_dir, prefix + "????????")
    -    cp_list = glob.glob(pattern)
    -    if len(cp_list) == 0:
    -        return None
    -    return sorted(cp_list)[-1]
    diff --git a/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/scripts/inference/infer.sh b/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/scripts/inference/infer.sh
    deleted file mode 100644
    index dec70e1f30fb80f6957f4f3382b4c0963827cf43..0000000000000000000000000000000000000000
    --- a/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/scripts/inference/infer.sh
    +++ /dev/null
    @@ -1,15 +0,0 @@
    -gender='male'
    -glowdir='../../checkpoints/glow/'$gender'/'
    -hifidir='../../checkpoints/hifi/'$gender'/'
    -device='cpu'
    -text='testing this one'
    -
    -
    -timestamp=$(date +%s)
    -wav='../../results/'$gender'/'
    -wav_file=$wav/$timestamp'.wav'
    -
    -
    -mkdir -p $wav
    -python ../../utils/inference/tts.py -a $glowdir -v $hifidir -d $device -t "$text" -w $wav_file 
    -echo "File saved at: "$wav_file
    diff --git a/spaces/HugoDzz/super-godot-galaxy/tailwind.config.js b/spaces/HugoDzz/super-godot-galaxy/tailwind.config.js
    deleted file mode 100644
    index 186f731d89a9ad8fca203cd5150f41eed9aca2e1..0000000000000000000000000000000000000000
    --- a/spaces/HugoDzz/super-godot-galaxy/tailwind.config.js
    +++ /dev/null
    @@ -1,12 +0,0 @@
    -/** @type {import('tailwindcss').Config} */
    -export default {
    -	content: ["./src/**/*.{html,js,svelte,ts}"],
    -	theme: {
    -		extend: {
    -			fontFamily: {
    -				Hellovetica: ["Hellovetica"]
    -			},
    -		},
    -	},
    -	plugins: [],
    -};
    diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/data/num_samples_dataset.py b/spaces/ICML2022/OFA/fairseq/fairseq/data/num_samples_dataset.py
    deleted file mode 100644
    index 99a17495c701d8a05e0268f98bf453905e11d078..0000000000000000000000000000000000000000
    --- a/spaces/ICML2022/OFA/fairseq/fairseq/data/num_samples_dataset.py
    +++ /dev/null
    @@ -1,17 +0,0 @@
    -# Copyright (c) Facebook, Inc. and its affiliates.
    -#
    -# This source code is licensed under the MIT license found in the
    -# LICENSE file in the root directory of this source tree.
    -
    -from . import FairseqDataset
    -
    -
    -class NumSamplesDataset(FairseqDataset):
    -    def __getitem__(self, index):
    -        return 1
    -
    -    def __len__(self):
    -        return 0
    -
    -    def collater(self, samples):
    -        return sum(samples)
    diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/models/roberta/enc_dec.py b/spaces/ICML2022/OFA/fairseq/fairseq/models/roberta/enc_dec.py
    deleted file mode 100644
    index e538dee0aa5984b1a3d02ce81117d2046c030593..0000000000000000000000000000000000000000
    --- a/spaces/ICML2022/OFA/fairseq/fairseq/models/roberta/enc_dec.py
    +++ /dev/null
    @@ -1,192 +0,0 @@
    -import argparse
    -import logging
    -
    -import torch.nn as nn
    -import fairseq.checkpoint_utils
    -from fairseq.models import (
    -    FairseqEncoderDecoderModel,
    -    register_model,
    -    register_model_architecture,
    -)
    -from fairseq.models.transformer import TransformerDecoder
    -from fairseq.models.roberta import model as roberta
    -
    -logger = logging.getLogger(__name__)
    -
    -
    -@register_model("roberta_enc_dec")
    -class RobertaEncDecModel(FairseqEncoderDecoderModel):
    -    @staticmethod
    -    def add_args(parser):
    -        parser.add_argument(
    -            "--pretrained-mlm-checkpoint",
    -            default=None,
    -            type=str,
    -            metavar="PRETRAINED",
    -            help="path to pretrained mlm checkpoint",
    -        )
    -        parser.add_argument(
    -            "--pretrained-decoder", action="store_true", help="reload decoder"
    -        )
    -        parser.add_argument(
    -            "--hack-layernorm-embedding",
    -            action="store_true",
    -            help="hack to reload old models trained with encoder-normalize-before=False (no equivalent to encoder-normalize-before=False and layernorm_embedding=False",
    -        )
    -        parser.add_argument(
    -            "--share-decoder-input-output-embed",
    -            action="store_true",
    -            help="share decoder input and output embeddings",
    -        )
    -        parser.add_argument(
    -            "--share-all-embeddings",
    -            action="store_true",
    -            help="share encoder, decoder and output embeddings"
    -            " (requires shared dictionary and embed dim)",
    -        )
    -
    -    @classmethod
    -    def build_model(cls, args, task):
    -        """Build a new model instance."""
    -
    -        # make sure all arguments are present
    -        base_enc_dec_architecture(args)
    -        if args.pretrained_mlm_checkpoint:
    -            arg_overrides = None
    -            if args.hack_layernorm_embedding:
    -                arg_overrides = {"layernorm_embedding": False}
    -            loaded = fairseq.checkpoint_utils.load_model_ensemble_and_task(
    -                [args.pretrained_mlm_checkpoint], arg_overrides=arg_overrides
    -            )
    -            ([roberta_enc], _cfg, _task) = loaded
    -        else:
    -            # Do we need to edit untie_weights here ?
    -            share_in_out = (
    -                args.share_decoder_input_output_embed or args.share_all_embeddings
    -            )
    -            args.untie_weights_roberta = not share_in_out
    -            if args.hack_layernorm_embedding:
    -                args.layernorm_embedding = False
    -                args.encoder_normalize_before = False
    -            roberta_enc = roberta.RobertaModel.build_model(args, task)
    -
    -        return cls.from_roberta(roberta_enc, args, task.source_dictionary)
    -
    -    @staticmethod
    -    def from_roberta(roberta_enc: roberta.RobertaModel, args, dictionary):
    -        encoder = roberta_enc.encoder.sentence_encoder
    -        vocab_size, embed_dim = encoder.embed_tokens.weight.shape
    -
    -        if args.share_all_embeddings:
    -            lm_head = roberta_enc.encoder.lm_head
    -            assert encoder.embed_tokens.weight is lm_head.weight, (
    -                "Can't use --share-all-embeddings with a model "
    -                "that was pretraiend with --untie-weights-roberta_enc"
    -            )
    -        else:
    -            lm_head = roberta.RobertaLMHead(
    -                embed_dim, vocab_size, roberta_enc.args.activation_fn
    -            )
    -
    -        dec_embs = nn.Embedding(vocab_size, embed_dim, dictionary.pad())
    -        if args.share_all_embeddings or args.share_decoder_input_output_embed:
    -            # Note: I wasn't able to use Embedding _weight parameter to achive this sharing.
    -            dec_embs.weight = lm_head.weight
    -
    -        decoder = TransformerDecoder(
    -            RobertaEncDecModel.read_args_from_roberta(roberta_enc.args),
    -            dictionary,
    -            dec_embs,
    -            no_encoder_attn=False,
    -            output_projection=lm_head,
    -        )
    -        if getattr(args, "pretrained_decoder", False):
    -            decoder_dict = encoder.state_dict()
    -
    -            # TODO: hide setting "encoder_attn" layers behind a flag.
    -            for k, w in list(decoder_dict.items()):
    -                if ".self_attn" in k:
    -                    k_enc_attn = k.replace(".self_attn", ".encoder_attn")
    -                    decoder_dict[k_enc_attn] = w.detach().clone()
    -
    -            for k, w in lm_head.state_dict().items():
    -                decoder_dict["output_projection." + k] = w
    -
    -            missing_keys, unexpected_keys = decoder.load_state_dict(
    -                decoder_dict, strict=False
    -            )
    -            # missing_keys = [m for m in missing_keys if ".encoder_attn" not in m]
    -            assert not missing_keys and not unexpected_keys, (
    -                "Failed to load state dict. "
    -                f"Missing keys: {missing_keys}. "
    -                f"Unexpected keys: {unexpected_keys}."
    -            )
    -
    -        if args.share_all_embeddings:
    -            assert decoder.output_projection.weight is decoder.embed_tokens.weight
    -            assert encoder.embed_tokens.weight is decoder.embed_tokens.weight
    -        elif args.share_decoder_input_output_embed:
    -            assert decoder.output_projection.weight is decoder.embed_tokens.weight
    -            assert encoder.embed_tokens.weight is not decoder.embed_tokens.weight
    -        else:
    -            assert decoder.output_projection.weight is not decoder.embed_tokens.weight
    -            assert encoder.embed_tokens.weight is not decoder.embed_tokens.weight
    -
    -        return RobertaEncDecModel(encoder, decoder)
    -
    -    @staticmethod
    -    def read_args_from_roberta(roberta_args: argparse.Namespace):
    -        # TODO: this would become easier if encoder/decoder where using a similar
    -        # TransformerConfig object
    -        args = argparse.Namespace(**vars(roberta_args))
    -        attr_map = [
    -            ("encoder_attention_heads", "decoder_attention_heads"),
    -            ("encoder_embed_dim", "decoder_embed_dim"),
    -            ("encoder_embed_dim", "decoder_output_dim"),
    -            ("encoder_normalize_before", "decoder_normalize_before"),
    -            ("encoder_layers_to_keep", "decoder_layers_to_keep"),
    -            ("encoder_ffn_embed_dim", "decoder_ffn_embed_dim"),
    -            ("encoder_layerdrop", "decoder_layerdrop"),
    -            ("encoder_layers", "decoder_layers"),
    -            ("encoder_learned_pos", "decoder_learned_pos"),
    -            # should this be set from here ?
    -            ("max_positions", "max_target_positions"),
    -        ]
    -        for k1, k2 in attr_map:
    -            setattr(args, k2, getattr(roberta_args, k1))
    -
    -        args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
    -        args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
    -        args.share_decoder_input_output_embed = not roberta_args.untie_weights_roberta
    -        return args
    -
    -    def upgrade_state_dict_named(self, state_dict, name):
    -        prefix = name + "." if name != "" else ""
    -        super().upgrade_state_dict_named(state_dict, name)
    -        old_keys = list(state_dict.keys())
    -
    -        # rename decoder -> encoder before upgrading children modules
    -        for k in old_keys:
    -            if k.startswith(prefix + "encoder.lm_head"):
    -                state_dict.pop(k)
    -                continue
    -            new_k = k
    -            new_k = new_k.replace(".sentence_encoder.", ".")
    -            new_k = new_k.replace("decoder.lm_head.", "decoder.output_projection.")
    -            if k == new_k:
    -                continue
    -            # print(k, "->", new_k)
    -            state_dict[new_k] = state_dict.pop(k)
    -
    -
    -@register_model_architecture("roberta_enc_dec", "roberta_enc_dec")
    -def base_enc_dec_architecture(args):
    -    args.hack_layernorm_embedding = getattr(args, "hack_layernorm_embedding", False)
    -    args.pretrained_mlm_checkpoint = getattr(args, "pretrained_mlm_checkpoint", None)
    -    args.pretrained_decoder = getattr(args, "pretrained_decoder", None)
    -    args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
    -    args.share_decoder_input_output_embed = getattr(
    -        args, "share_decoder_input_output_embed", False
    -    )
    -
    -    roberta.base_architecture(args)
    diff --git a/spaces/IDEA-CCNL/Erlangshen-UniMC-Zero-Shot/README.md b/spaces/IDEA-CCNL/Erlangshen-UniMC-Zero-Shot/README.md
    deleted file mode 100644
    index fbe757c131a190affd39ad841092770f05e00fb5..0000000000000000000000000000000000000000
    --- a/spaces/IDEA-CCNL/Erlangshen-UniMC-Zero-Shot/README.md
    +++ /dev/null
    @@ -1,13 +0,0 @@
    ----
    -title: Erlangshen-UniMC-Zero-Shot
    -emoji: 🐢
    -colorFrom: green
    -colorTo: gray
    -sdk: streamlit
    -sdk_version: 1.10.0
    -app_file: app.py
    -pinned: false
    -license: apache-2.0
    ----
    -
    -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
    diff --git a/spaces/Iceclear/StableSR/StableSR/ldm/modules/image_degradation/bsrgan.py b/spaces/Iceclear/StableSR/StableSR/ldm/modules/image_degradation/bsrgan.py
    deleted file mode 100644
    index 32ef56169978e550090261cddbcf5eb611a6173b..0000000000000000000000000000000000000000
    --- a/spaces/Iceclear/StableSR/StableSR/ldm/modules/image_degradation/bsrgan.py
    +++ /dev/null
    @@ -1,730 +0,0 @@
    -# -*- coding: utf-8 -*-
    -"""
    -# --------------------------------------------
    -# Super-Resolution
    -# --------------------------------------------
    -#
    -# Kai Zhang (cskaizhang@gmail.com)
    -# https://github.com/cszn
    -# From 2019/03--2021/08
    -# --------------------------------------------
    -"""
    -
    -import numpy as np
    -import cv2
    -import torch
    -
    -from functools import partial
    -import random
    -from scipy import ndimage
    -import scipy
    -import scipy.stats as ss
    -from scipy.interpolate import interp2d
    -from scipy.linalg import orth
    -import albumentations
    -
    -import ldm.modules.image_degradation.utils_image as util
    -
    -
    -def modcrop_np(img, sf):
    -    '''
    -    Args:
    -        img: numpy image, WxH or WxHxC
    -        sf: scale factor
    -    Return:
    -        cropped image
    -    '''
    -    w, h = img.shape[:2]
    -    im = np.copy(img)
    -    return im[:w - w % sf, :h - h % sf, ...]
    -
    -
    -"""
    -# --------------------------------------------
    -# anisotropic Gaussian kernels
    -# --------------------------------------------
    -"""
    -
    -
    -def analytic_kernel(k):
    -    """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
    -    k_size = k.shape[0]
    -    # Calculate the big kernels size
    -    big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
    -    # Loop over the small kernel to fill the big one
    -    for r in range(k_size):
    -        for c in range(k_size):
    -            big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
    -    # Crop the edges of the big kernel to ignore very small values and increase run time of SR
    -    crop = k_size // 2
    -    cropped_big_k = big_k[crop:-crop, crop:-crop]
    -    # Normalize to 1
    -    return cropped_big_k / cropped_big_k.sum()
    -
    -
    -def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
    -    """ generate an anisotropic Gaussian kernel
    -    Args:
    -        ksize : e.g., 15, kernel size
    -        theta : [0,  pi], rotation angle range
    -        l1    : [0.1,50], scaling of eigenvalues
    -        l2    : [0.1,l1], scaling of eigenvalues
    -        If l1 = l2, will get an isotropic Gaussian kernel.
    -    Returns:
    -        k     : kernel
    -    """
    -
    -    v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
    -    V = np.array([[v[0], v[1]], [v[1], -v[0]]])
    -    D = np.array([[l1, 0], [0, l2]])
    -    Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
    -    k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
    -
    -    return k
    -
    -
    -def gm_blur_kernel(mean, cov, size=15):
    -    center = size / 2.0 + 0.5
    -    k = np.zeros([size, size])
    -    for y in range(size):
    -        for x in range(size):
    -            cy = y - center + 1
    -            cx = x - center + 1
    -            k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
    -
    -    k = k / np.sum(k)
    -    return k
    -
    -
    -def shift_pixel(x, sf, upper_left=True):
    -    """shift pixel for super-resolution with different scale factors
    -    Args:
    -        x: WxHxC or WxH
    -        sf: scale factor
    -        upper_left: shift direction
    -    """
    -    h, w = x.shape[:2]
    -    shift = (sf - 1) * 0.5
    -    xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
    -    if upper_left:
    -        x1 = xv + shift
    -        y1 = yv + shift
    -    else:
    -        x1 = xv - shift
    -        y1 = yv - shift
    -
    -    x1 = np.clip(x1, 0, w - 1)
    -    y1 = np.clip(y1, 0, h - 1)
    -
    -    if x.ndim == 2:
    -        x = interp2d(xv, yv, x)(x1, y1)
    -    if x.ndim == 3:
    -        for i in range(x.shape[-1]):
    -            x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
    -
    -    return x
    -
    -
    -def blur(x, k):
    -    '''
    -    x: image, NxcxHxW
    -    k: kernel, Nx1xhxw
    -    '''
    -    n, c = x.shape[:2]
    -    p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
    -    x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
    -    k = k.repeat(1, c, 1, 1)
    -    k = k.view(-1, 1, k.shape[2], k.shape[3])
    -    x = x.view(1, -1, x.shape[2], x.shape[3])
    -    x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
    -    x = x.view(n, c, x.shape[2], x.shape[3])
    -
    -    return x
    -
    -
    -def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
    -    """"
    -    # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
    -    # Kai Zhang
    -    # min_var = 0.175 * sf  # variance of the gaussian kernel will be sampled between min_var and max_var
    -    # max_var = 2.5 * sf
    -    """
    -    # Set random eigen-vals (lambdas) and angle (theta) for COV matrix
    -    lambda_1 = min_var + np.random.rand() * (max_var - min_var)
    -    lambda_2 = min_var + np.random.rand() * (max_var - min_var)
    -    theta = np.random.rand() * np.pi  # random theta
    -    noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
    -
    -    # Set COV matrix using Lambdas and Theta
    -    LAMBDA = np.diag([lambda_1, lambda_2])
    -    Q = np.array([[np.cos(theta), -np.sin(theta)],
    -                  [np.sin(theta), np.cos(theta)]])
    -    SIGMA = Q @ LAMBDA @ Q.T
    -    INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
    -
    -    # Set expectation position (shifting kernel for aligned image)
    -    MU = k_size // 2 - 0.5 * (scale_factor - 1)  # - 0.5 * (scale_factor - k_size % 2)
    -    MU = MU[None, None, :, None]
    -
    -    # Create meshgrid for Gaussian
    -    [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
    -    Z = np.stack([X, Y], 2)[:, :, :, None]
    -
    -    # Calcualte Gaussian for every pixel of the kernel
    -    ZZ = Z - MU
    -    ZZ_t = ZZ.transpose(0, 1, 3, 2)
    -    raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
    -
    -    # shift the kernel so it will be centered
    -    # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
    -
    -    # Normalize the kernel and return
    -    # kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
    -    kernel = raw_kernel / np.sum(raw_kernel)
    -    return kernel
    -
    -
    -def fspecial_gaussian(hsize, sigma):
    -    hsize = [hsize, hsize]
    -    siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
    -    std = sigma
    -    [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
    -    arg = -(x * x + y * y) / (2 * std * std)
    -    h = np.exp(arg)
    -    h[h < scipy.finfo(float).eps * h.max()] = 0
    -    sumh = h.sum()
    -    if sumh != 0:
    -        h = h / sumh
    -    return h
    -
    -
    -def fspecial_laplacian(alpha):
    -    alpha = max([0, min([alpha, 1])])
    -    h1 = alpha / (alpha + 1)
    -    h2 = (1 - alpha) / (alpha + 1)
    -    h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
    -    h = np.array(h)
    -    return h
    -
    -
    -def fspecial(filter_type, *args, **kwargs):
    -    '''
    -    python code from:
    -    https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
    -    '''
    -    if filter_type == 'gaussian':
    -        return fspecial_gaussian(*args, **kwargs)
    -    if filter_type == 'laplacian':
    -        return fspecial_laplacian(*args, **kwargs)
    -
    -
    -"""
    -# --------------------------------------------
    -# degradation models
    -# --------------------------------------------
    -"""
    -
    -
    -def bicubic_degradation(x, sf=3):
    -    '''
    -    Args:
    -        x: HxWxC image, [0, 1]
    -        sf: down-scale factor
    -    Return:
    -        bicubicly downsampled LR image
    -    '''
    -    x = util.imresize_np(x, scale=1 / sf)
    -    return x
    -
    -
    -def srmd_degradation(x, k, sf=3):
    -    ''' blur + bicubic downsampling
    -    Args:
    -        x: HxWxC image, [0, 1]
    -        k: hxw, double
    -        sf: down-scale factor
    -    Return:
    -        downsampled LR image
    -    Reference:
    -        @inproceedings{zhang2018learning,
    -          title={Learning a single convolutional super-resolution network for multiple degradations},
    -          author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
    -          booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
    -          pages={3262--3271},
    -          year={2018}
    -        }
    -    '''
    -    x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')  # 'nearest' | 'mirror'
    -    x = bicubic_degradation(x, sf=sf)
    -    return x
    -
    -
    -def dpsr_degradation(x, k, sf=3):
    -    ''' bicubic downsampling + blur
    -    Args:
    -        x: HxWxC image, [0, 1]
    -        k: hxw, double
    -        sf: down-scale factor
    -    Return:
    -        downsampled LR image
    -    Reference:
    -        @inproceedings{zhang2019deep,
    -          title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
    -          author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
    -          booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
    -          pages={1671--1681},
    -          year={2019}
    -        }
    -    '''
    -    x = bicubic_degradation(x, sf=sf)
    -    x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
    -    return x
    -
    -
    -def classical_degradation(x, k, sf=3):
    -    ''' blur + downsampling
    -    Args:
    -        x: HxWxC image, [0, 1]/[0, 255]
    -        k: hxw, double
    -        sf: down-scale factor
    -    Return:
    -        downsampled LR image
    -    '''
    -    x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
    -    # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
    -    st = 0
    -    return x[st::sf, st::sf, ...]
    -
    -
    -def add_sharpening(img, weight=0.5, radius=50, threshold=10):
    -    """USM sharpening. borrowed from real-ESRGAN
    -    Input image: I; Blurry image: B.
    -    1. K = I + weight * (I - B)
    -    2. Mask = 1 if abs(I - B) > threshold, else: 0
    -    3. Blur mask:
    -    4. Out = Mask * K + (1 - Mask) * I
    -    Args:
    -        img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
    -        weight (float): Sharp weight. Default: 1.
    -        radius (float): Kernel size of Gaussian blur. Default: 50.
    -        threshold (int):
    -    """
    -    if radius % 2 == 0:
    -        radius += 1
    -    blur = cv2.GaussianBlur(img, (radius, radius), 0)
    -    residual = img - blur
    -    mask = np.abs(residual) * 255 > threshold
    -    mask = mask.astype('float32')
    -    soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
    -
    -    K = img + weight * residual
    -    K = np.clip(K, 0, 1)
    -    return soft_mask * K + (1 - soft_mask) * img
    -
    -
    -def add_blur(img, sf=4):
    -    wd2 = 4.0 + sf
    -    wd = 2.0 + 0.2 * sf
    -    if random.random() < 0.5:
    -        l1 = wd2 * random.random()
    -        l2 = wd2 * random.random()
    -        k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
    -    else:
    -        k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random())
    -    img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
    -
    -    return img
    -
    -
    -def add_resize(img, sf=4):
    -    rnum = np.random.rand()
    -    if rnum > 0.8:  # up
    -        sf1 = random.uniform(1, 2)
    -    elif rnum < 0.7:  # down
    -        sf1 = random.uniform(0.5 / sf, 1)
    -    else:
    -        sf1 = 1.0
    -    img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
    -    img = np.clip(img, 0.0, 1.0)
    -
    -    return img
    -
    -
    -# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
    -#     noise_level = random.randint(noise_level1, noise_level2)
    -#     rnum = np.random.rand()
    -#     if rnum > 0.6:  # add color Gaussian noise
    -#         img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
    -#     elif rnum < 0.4:  # add grayscale Gaussian noise
    -#         img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
    -#     else:  # add  noise
    -#         L = noise_level2 / 255.
    -#         D = np.diag(np.random.rand(3))
    -#         U = orth(np.random.rand(3, 3))
    -#         conv = np.dot(np.dot(np.transpose(U), D), U)
    -#         img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
    -#     img = np.clip(img, 0.0, 1.0)
    -#     return img
    -
    -def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
    -    noise_level = random.randint(noise_level1, noise_level2)
    -    rnum = np.random.rand()
    -    if rnum > 0.6:  # add color Gaussian noise
    -        img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
    -    elif rnum < 0.4:  # add grayscale Gaussian noise
    -        img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
    -    else:  # add  noise
    -        L = noise_level2 / 255.
    -        D = np.diag(np.random.rand(3))
    -        U = orth(np.random.rand(3, 3))
    -        conv = np.dot(np.dot(np.transpose(U), D), U)
    -        img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
    -    img = np.clip(img, 0.0, 1.0)
    -    return img
    -
    -
    -def add_speckle_noise(img, noise_level1=2, noise_level2=25):
    -    noise_level = random.randint(noise_level1, noise_level2)
    -    img = np.clip(img, 0.0, 1.0)
    -    rnum = random.random()
    -    if rnum > 0.6:
    -        img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
    -    elif rnum < 0.4:
    -        img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
    -    else:
    -        L = noise_level2 / 255.
    -        D = np.diag(np.random.rand(3))
    -        U = orth(np.random.rand(3, 3))
    -        conv = np.dot(np.dot(np.transpose(U), D), U)
    -        img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
    -    img = np.clip(img, 0.0, 1.0)
    -    return img
    -
    -
    -def add_Poisson_noise(img):
    -    img = np.clip((img * 255.0).round(), 0, 255) / 255.
    -    vals = 10 ** (2 * random.random() + 2.0)  # [2, 4]
    -    if random.random() < 0.5:
    -        img = np.random.poisson(img * vals).astype(np.float32) / vals
    -    else:
    -        img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
    -        img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
    -        noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
    -        img += noise_gray[:, :, np.newaxis]
    -    img = np.clip(img, 0.0, 1.0)
    -    return img
    -
    -
    -def add_JPEG_noise(img):
    -    quality_factor = random.randint(30, 95)
    -    img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
    -    result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
    -    img = cv2.imdecode(encimg, 1)
    -    img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
    -    return img
    -
    -
    -def random_crop(lq, hq, sf=4, lq_patchsize=64):
    -    h, w = lq.shape[:2]
    -    rnd_h = random.randint(0, h - lq_patchsize)
    -    rnd_w = random.randint(0, w - lq_patchsize)
    -    lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
    -
    -    rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
    -    hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
    -    return lq, hq
    -
    -
    -def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
    -    """
    -    This is the degradation model of BSRGAN from the paper
    -    "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
    -    ----------
    -    img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
    -    sf: scale factor
    -    isp_model: camera ISP model
    -    Returns
    -    -------
    -    img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
    -    hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
    -    """
    -    isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
    -    sf_ori = sf
    -
    -    h1, w1 = img.shape[:2]
    -    img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...]  # mod crop
    -    h, w = img.shape[:2]
    -
    -    if h < lq_patchsize * sf or w < lq_patchsize * sf:
    -        raise ValueError(f'img size ({h1}X{w1}) is too small!')
    -
    -    hq = img.copy()
    -
    -    if sf == 4 and random.random() < scale2_prob:  # downsample1
    -        if np.random.rand() < 0.5:
    -            img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
    -                             interpolation=random.choice([1, 2, 3]))
    -        else:
    -            img = util.imresize_np(img, 1 / 2, True)
    -        img = np.clip(img, 0.0, 1.0)
    -        sf = 2
    -
    -    shuffle_order = random.sample(range(7), 7)
    -    idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
    -    if idx1 > idx2:  # keep downsample3 last
    -        shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
    -
    -    for i in shuffle_order:
    -
    -        if i == 0:
    -            img = add_blur(img, sf=sf)
    -
    -        elif i == 1:
    -            img = add_blur(img, sf=sf)
    -
    -        elif i == 2:
    -            a, b = img.shape[1], img.shape[0]
    -            # downsample2
    -            if random.random() < 0.75:
    -                sf1 = random.uniform(1, 2 * sf)
    -                img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
    -                                 interpolation=random.choice([1, 2, 3]))
    -            else:
    -                k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
    -                k_shifted = shift_pixel(k, sf)
    -                k_shifted = k_shifted / k_shifted.sum()  # blur with shifted kernel
    -                img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
    -                img = img[0::sf, 0::sf, ...]  # nearest downsampling
    -            img = np.clip(img, 0.0, 1.0)
    -
    -        elif i == 3:
    -            # downsample3
    -            img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
    -            img = np.clip(img, 0.0, 1.0)
    -
    -        elif i == 4:
    -            # add Gaussian noise
    -            img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
    -
    -        elif i == 5:
    -            # add JPEG noise
    -            if random.random() < jpeg_prob:
    -                img = add_JPEG_noise(img)
    -
    -        elif i == 6:
    -            # add processed camera sensor noise
    -            if random.random() < isp_prob and isp_model is not None:
    -                with torch.no_grad():
    -                    img, hq = isp_model.forward(img.copy(), hq)
    -
    -    # add final JPEG compression noise
    -    img = add_JPEG_noise(img)
    -
    -    # random crop
    -    img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
    -
    -    return img, hq
    -
    -
    -# todo no isp_model?
    -def degradation_bsrgan_variant(image, sf=4, isp_model=None):
    -    """
    -    This is the degradation model of BSRGAN from the paper
    -    "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
    -    ----------
    -    sf: scale factor
    -    isp_model: camera ISP model
    -    Returns
    -    -------
    -    img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
    -    hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
    -    """
    -    image = util.uint2single(image)
    -    isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
    -    sf_ori = sf
    -
    -    h1, w1 = image.shape[:2]
    -    image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...]  # mod crop
    -    h, w = image.shape[:2]
    -
    -    hq = image.copy()
    -
    -    if sf == 4 and random.random() < scale2_prob:  # downsample1
    -        if np.random.rand() < 0.5:
    -            image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
    -                               interpolation=random.choice([1, 2, 3]))
    -        else:
    -            image = util.imresize_np(image, 1 / 2, True)
    -        image = np.clip(image, 0.0, 1.0)
    -        sf = 2
    -
    -    shuffle_order = random.sample(range(7), 7)
    -    idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
    -    if idx1 > idx2:  # keep downsample3 last
    -        shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
    -
    -    for i in shuffle_order:
    -
    -        if i == 0:
    -            image = add_blur(image, sf=sf)
    -
    -        elif i == 1:
    -            image = add_blur(image, sf=sf)
    -
    -        elif i == 2:
    -            a, b = image.shape[1], image.shape[0]
    -            # downsample2
    -            if random.random() < 0.75:
    -                sf1 = random.uniform(1, 2 * sf)
    -                image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
    -                                   interpolation=random.choice([1, 2, 3]))
    -            else:
    -                k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
    -                k_shifted = shift_pixel(k, sf)
    -                k_shifted = k_shifted / k_shifted.sum()  # blur with shifted kernel
    -                image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
    -                image = image[0::sf, 0::sf, ...]  # nearest downsampling
    -            image = np.clip(image, 0.0, 1.0)
    -
    -        elif i == 3:
    -            # downsample3
    -            image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
    -            image = np.clip(image, 0.0, 1.0)
    -
    -        elif i == 4:
    -            # add Gaussian noise
    -            image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25)
    -
    -        elif i == 5:
    -            # add JPEG noise
    -            if random.random() < jpeg_prob:
    -                image = add_JPEG_noise(image)
    -
    -        # elif i == 6:
    -        #     # add processed camera sensor noise
    -        #     if random.random() < isp_prob and isp_model is not None:
    -        #         with torch.no_grad():
    -        #             img, hq = isp_model.forward(img.copy(), hq)
    -
    -    # add final JPEG compression noise
    -    image = add_JPEG_noise(image)
    -    image = util.single2uint(image)
    -    example = {"image":image}
    -    return example
    -
    -
    -# TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc...
    -def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None):
    -    """
    -    This is an extended degradation model by combining
    -    the degradation models of BSRGAN and Real-ESRGAN
    -    ----------
    -    img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
    -    sf: scale factor
    -    use_shuffle: the degradation shuffle
    -    use_sharp: sharpening the img
    -    Returns
    -    -------
    -    img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
    -    hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
    -    """
    -
    -    h1, w1 = img.shape[:2]
    -    img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...]  # mod crop
    -    h, w = img.shape[:2]
    -
    -    if h < lq_patchsize * sf or w < lq_patchsize * sf:
    -        raise ValueError(f'img size ({h1}X{w1}) is too small!')
    -
    -    if use_sharp:
    -        img = add_sharpening(img)
    -    hq = img.copy()
    -
    -    if random.random() < shuffle_prob:
    -        shuffle_order = random.sample(range(13), 13)
    -    else:
    -        shuffle_order = list(range(13))
    -        # local shuffle for noise, JPEG is always the last one
    -        shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6)))
    -        shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13)))
    -
    -    poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1
    -
    -    for i in shuffle_order:
    -        if i == 0:
    -            img = add_blur(img, sf=sf)
    -        elif i == 1:
    -            img = add_resize(img, sf=sf)
    -        elif i == 2:
    -            img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
    -        elif i == 3:
    -            if random.random() < poisson_prob:
    -                img = add_Poisson_noise(img)
    -        elif i == 4:
    -            if random.random() < speckle_prob:
    -                img = add_speckle_noise(img)
    -        elif i == 5:
    -            if random.random() < isp_prob and isp_model is not None:
    -                with torch.no_grad():
    -                    img, hq = isp_model.forward(img.copy(), hq)
    -        elif i == 6:
    -            img = add_JPEG_noise(img)
    -        elif i == 7:
    -            img = add_blur(img, sf=sf)
    -        elif i == 8:
    -            img = add_resize(img, sf=sf)
    -        elif i == 9:
    -            img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
    -        elif i == 10:
    -            if random.random() < poisson_prob:
    -                img = add_Poisson_noise(img)
    -        elif i == 11:
    -            if random.random() < speckle_prob:
    -                img = add_speckle_noise(img)
    -        elif i == 12:
    -            if random.random() < isp_prob and isp_model is not None:
    -                with torch.no_grad():
    -                    img, hq = isp_model.forward(img.copy(), hq)
    -        else:
    -            print('check the shuffle!')
    -
    -    # resize to desired size
    -    img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])),
    -                     interpolation=random.choice([1, 2, 3]))
    -
    -    # add final JPEG compression noise
    -    img = add_JPEG_noise(img)
    -
    -    # random crop
    -    img, hq = random_crop(img, hq, sf, lq_patchsize)
    -
    -    return img, hq
    -
    -
    -if __name__ == '__main__':
    -	print("hey")
    -	img = util.imread_uint('utils/test.png', 3)
    -	print(img)
    -	img = util.uint2single(img)
    -	print(img)
    -	img = img[:448, :448]
    -	h = img.shape[0] // 4
    -	print("resizing to", h)
    -	sf = 4
    -	deg_fn = partial(degradation_bsrgan_variant, sf=sf)
    -	for i in range(20):
    -		print(i)
    -		img_lq = deg_fn(img)
    -		print(img_lq)
    -		img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"]
    -		print(img_lq.shape)
    -		print("bicubic", img_lq_bicubic.shape)
    -		print(img_hq.shape)
    -		lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
    -		                        interpolation=0)
    -		lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
    -		                        interpolation=0)
    -		img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
    -		util.imsave(img_concat, str(i) + '.png')
    -
    -
    diff --git a/spaces/Ishayy/space_1/app.py b/spaces/Ishayy/space_1/app.py
    deleted file mode 100644
    index 57b7b791807d111f20403524a5d21fe94bdf2b5c..0000000000000000000000000000000000000000
    --- a/spaces/Ishayy/space_1/app.py
    +++ /dev/null
    @@ -1,13 +0,0 @@
    -from transformers import PegasusForConditionalGeneration,PegasusTokenizer
    -import gradio as grad
    -mdl_name = "google/pegasus-xsum"
    -pegasus_tkn = PegasusTokenizer.from_pretrained(mdl_name)
    -mdl = PegasusForConditionalGeneration.from_pretrained(mdl_name)
    -def summarize(text):
    - tokens = pegasus_tkn(text, truncation=True, padding="longest",return_tensors="pt")
    - txt_summary = mdl.generate(**tokens)
    - response = pegasus_tkn.batch_decode(txt_summary,skip_special_tokens=True)
    - return response
    -txt=grad.Textbox(lines=10, label="English", placeholder="English Texthere")
    -out=grad.Textbox(lines=10, label="Summary")
    -grad.Interface(summarize, inputs=txt, outputs=out).launch()
    \ No newline at end of file
    diff --git a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/archs/arch_util.py b/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/archs/arch_util.py
    deleted file mode 100644
    index bad45ab34e901c47fb539152fca714a3795b0de2..0000000000000000000000000000000000000000
    --- a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/archs/arch_util.py
    +++ /dev/null
    @@ -1,318 +0,0 @@
    -import collections.abc
    -import math
    -import torch
    -import torchvision
    -import warnings
    -from distutils.version import LooseVersion
    -from itertools import repeat
    -from torch import nn as nn
    -from torch.nn import functional as F
    -from torch.nn import init as init
    -from torch.nn.modules.batchnorm import _BatchNorm
    -
    -from basicsr.ops.dcn import ModulatedDeformConvPack, modulated_deform_conv
    -from basicsr.utils import get_root_logger
    -
    -
    -@torch.no_grad()
    -def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs):
    -    """Initialize network weights.
    -
    -    Args:
    -        module_list (list[nn.Module] | nn.Module): Modules to be initialized.
    -        scale (float): Scale initialized weights, especially for residual
    -            blocks. Default: 1.
    -        bias_fill (float): The value to fill bias. Default: 0
    -        kwargs (dict): Other arguments for initialization function.
    -    """
    -    if not isinstance(module_list, list):
    -        module_list = [module_list]
    -    for module in module_list:
    -        for m in module.modules():
    -            if isinstance(m, nn.Conv2d):
    -                init.kaiming_normal_(m.weight, **kwargs)
    -                m.weight.data *= scale
    -                if m.bias is not None:
    -                    m.bias.data.fill_(bias_fill)
    -            elif isinstance(m, nn.Linear):
    -                init.kaiming_normal_(m.weight, **kwargs)
    -                m.weight.data *= scale
    -                if m.bias is not None:
    -                    m.bias.data.fill_(bias_fill)
    -            elif isinstance(m, _BatchNorm):
    -                init.constant_(m.weight, 1)
    -                if m.bias is not None:
    -                    m.bias.data.fill_(bias_fill)
    -
    -
    -def make_layer(basic_block, num_basic_block, **kwarg):
    -    """Make layers by stacking the same blocks.
    -
    -    Args:
    -        basic_block (nn.module): nn.module class for basic block.
    -        num_basic_block (int): number of blocks.
    -
    -    Returns:
    -        nn.Sequential: Stacked blocks in nn.Sequential.
    -    """
    -    layers = []
    -    for _ in range(num_basic_block):
    -        layers.append(basic_block(**kwarg))
    -    return nn.Sequential(*layers)
    -
    -
    -class ResidualBlockNoBN(nn.Module):
    -    """Residual block without BN.
    -
    -    It has a style of:
    -        ---Conv-ReLU-Conv-+-
    -         |________________|
    -
    -    Args:
    -        num_feat (int): Channel number of intermediate features.
    -            Default: 64.
    -        res_scale (float): Residual scale. Default: 1.
    -        pytorch_init (bool): If set to True, use pytorch default init,
    -            otherwise, use default_init_weights. Default: False.
    -    """
    -
    -    def __init__(self, num_feat=64, res_scale=1, pytorch_init=False):
    -        super(ResidualBlockNoBN, self).__init__()
    -        self.res_scale = res_scale
    -        self.conv1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True)
    -        self.conv2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True)
    -        self.relu = nn.ReLU(inplace=True)
    -
    -        if not pytorch_init:
    -            default_init_weights([self.conv1, self.conv2], 0.1)
    -
    -    def forward(self, x):
    -        identity = x
    -        out = self.conv2(self.relu(self.conv1(x)))
    -        return identity + out * self.res_scale
    -
    -
    -class Upsample(nn.Sequential):
    -    """Upsample module.
    -
    -    Args:
    -        scale (int): Scale factor. Supported scales: 2^n and 3.
    -        num_feat (int): Channel number of intermediate features.
    -    """
    -
    -    def __init__(self, scale, num_feat):
    -        m = []
    -        if (scale & (scale - 1)) == 0:  # scale = 2^n
    -            for _ in range(int(math.log(scale, 2))):
    -                m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
    -                m.append(nn.PixelShuffle(2))
    -        elif scale == 3:
    -            m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
    -            m.append(nn.PixelShuffle(3))
    -        else:
    -            raise ValueError(f'scale {scale} is not supported. Supported scales: 2^n and 3.')
    -        super(Upsample, self).__init__(*m)
    -
    -
    -def flow_warp(x, flow, interp_mode='bilinear', padding_mode='zeros', align_corners=True):
    -    """Warp an image or feature map with optical flow.
    -
    -    Args:
    -        x (Tensor): Tensor with size (n, c, h, w).
    -        flow (Tensor): Tensor with size (n, h, w, 2), normal value.
    -        interp_mode (str): 'nearest' or 'bilinear'. Default: 'bilinear'.
    -        padding_mode (str): 'zeros' or 'border' or 'reflection'.
    -            Default: 'zeros'.
    -        align_corners (bool): Before pytorch 1.3, the default value is
    -            align_corners=True. After pytorch 1.3, the default value is
    -            align_corners=False. Here, we use the True as default.
    -
    -    Returns:
    -        Tensor: Warped image or feature map.
    -    """
    -    assert x.size()[-2:] == flow.size()[1:3]
    -    _, _, h, w = x.size()
    -    # create mesh grid
    -    grid_y, grid_x = torch.meshgrid(torch.arange(0, h).type_as(x), torch.arange(0, w).type_as(x))
    -    grid = torch.stack((grid_x, grid_y), 2).float()  # W(x), H(y), 2
    -    grid.requires_grad = False
    -
    -    vgrid = grid + flow
    -    # scale grid to [-1,1]
    -    vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(w - 1, 1) - 1.0
    -    vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(h - 1, 1) - 1.0
    -    vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)
    -    output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode, align_corners=align_corners)
    -
    -    # TODO, what if align_corners=False
    -    return output
    -
    -
    -def resize_flow(flow, size_type, sizes, interp_mode='bilinear', align_corners=False):
    -    """Resize a flow according to ratio or shape.
    -
    -    Args:
    -        flow (Tensor): Precomputed flow. shape [N, 2, H, W].
    -        size_type (str): 'ratio' or 'shape'.
    -        sizes (list[int | float]): the ratio for resizing or the final output
    -            shape.
    -            1) The order of ratio should be [ratio_h, ratio_w]. For
    -            downsampling, the ratio should be smaller than 1.0 (i.e., ratio
    -            < 1.0). For upsampling, the ratio should be larger than 1.0 (i.e.,
    -            ratio > 1.0).
    -            2) The order of output_size should be [out_h, out_w].
    -        interp_mode (str): The mode of interpolation for resizing.
    -            Default: 'bilinear'.
    -        align_corners (bool): Whether align corners. Default: False.
    -
    -    Returns:
    -        Tensor: Resized flow.
    -    """
    -    _, _, flow_h, flow_w = flow.size()
    -    if size_type == 'ratio':
    -        output_h, output_w = int(flow_h * sizes[0]), int(flow_w * sizes[1])
    -    elif size_type == 'shape':
    -        output_h, output_w = sizes[0], sizes[1]
    -    else:
    -        raise ValueError(f'Size type should be ratio or shape, but got type {size_type}.')
    -
    -    input_flow = flow.clone()
    -    ratio_h = output_h / flow_h
    -    ratio_w = output_w / flow_w
    -    input_flow[:, 0, :, :] *= ratio_w
    -    input_flow[:, 1, :, :] *= ratio_h
    -    resized_flow = F.interpolate(
    -        input=input_flow, size=(output_h, output_w), mode=interp_mode, align_corners=align_corners)
    -    return resized_flow
    -
    -
    -# TODO: may write a cpp file
    -def pixel_unshuffle(x, scale):
    -    """ Pixel unshuffle.
    -
    -    Args:
    -        x (Tensor): Input feature with shape (b, c, hh, hw).
    -        scale (int): Downsample ratio.
    -
    -    Returns:
    -        Tensor: the pixel unshuffled feature.
    -    """
    -    b, c, hh, hw = x.size()
    -    out_channel = c * (scale**2)
    -    assert hh % scale == 0 and hw % scale == 0
    -    h = hh // scale
    -    w = hw // scale
    -    x_view = x.view(b, c, h, scale, w, scale)
    -    return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)
    -
    -
    -class DCNv2Pack(ModulatedDeformConvPack):
    -    """Modulated deformable conv for deformable alignment.
    -
    -    Different from the official DCNv2Pack, which generates offsets and masks
    -    from the preceding features, this DCNv2Pack takes another different
    -    features to generate offsets and masks.
    -
    -    Ref:
    -        Delving Deep into Deformable Alignment in Video Super-Resolution.
    -    """
    -
    -    def forward(self, x, feat):
    -        out = self.conv_offset(feat)
    -        o1, o2, mask = torch.chunk(out, 3, dim=1)
    -        offset = torch.cat((o1, o2), dim=1)
    -        mask = torch.sigmoid(mask)
    -
    -        offset_absmean = torch.mean(torch.abs(offset))
    -        if offset_absmean > 50:
    -            logger = get_root_logger()
    -            logger.warning(f'Offset abs mean is {offset_absmean}, larger than 50.')
    -
    -        if LooseVersion(torchvision.__version__) >= LooseVersion('0.9.0'):
    -            return torchvision.ops.deform_conv2d(x, offset, self.weight, self.bias, self.stride, self.padding,
    -                                                 self.dilation, mask)
    -        else:
    -            return modulated_deform_conv(x, offset, mask, self.weight, self.bias, self.stride, self.padding,
    -                                         self.dilation, self.groups, self.deformable_groups)
    -
    -
    -def _no_grad_trunc_normal_(tensor, mean, std, a, b):
    -    # From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
    -    # Cut & paste from PyTorch official master until it's in a few official releases - RW
    -    # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
    -    def norm_cdf(x):
    -        # Computes standard normal cumulative distribution function
    -        return (1. + math.erf(x / math.sqrt(2.))) / 2.
    -
    -    if (mean < a - 2 * std) or (mean > b + 2 * std):
    -        warnings.warn(
    -            'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
    -            'The distribution of values may be incorrect.',
    -            stacklevel=2)
    -
    -    with torch.no_grad():
    -        # Values are generated by using a truncated uniform distribution and
    -        # then using the inverse CDF for the normal distribution.
    -        # Get upper and lower cdf values
    -        low = norm_cdf((a - mean) / std)
    -        up = norm_cdf((b - mean) / std)
    -
    -        # Uniformly fill tensor with values from [low, up], then translate to
    -        # [2l-1, 2u-1].
    -        tensor.uniform_(2 * low - 1, 2 * up - 1)
    -
    -        # Use inverse cdf transform for normal distribution to get truncated
    -        # standard normal
    -        tensor.erfinv_()
    -
    -        # Transform to proper mean, std
    -        tensor.mul_(std * math.sqrt(2.))
    -        tensor.add_(mean)
    -
    -        # Clamp to ensure it's in the proper range
    -        tensor.clamp_(min=a, max=b)
    -        return tensor
    -
    -
    -def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
    -    r"""Fills the input Tensor with values drawn from a truncated
    -    normal distribution.
    -
    -    From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
    -
    -    The values are effectively drawn from the
    -    normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
    -    with values outside :math:`[a, b]` redrawn until they are within
    -    the bounds. The method used for generating the random values works
    -    best when :math:`a \leq \text{mean} \leq b`.
    -
    -    Args:
    -        tensor: an n-dimensional `torch.Tensor`
    -        mean: the mean of the normal distribution
    -        std: the standard deviation of the normal distribution
    -        a: the minimum cutoff value
    -        b: the maximum cutoff value
    -
    -    Examples:
    -        >>> w = torch.empty(3, 5)
    -        >>> nn.init.trunc_normal_(w)
    -    """
    -    return _no_grad_trunc_normal_(tensor, mean, std, a, b)
    -
    -
    -# From PyTorch
    -def _ntuple(n):
    -
    -    def parse(x):
    -        if isinstance(x, collections.abc.Iterable):
    -            return x
    -        return tuple(repeat(x, n))
    -
    -    return parse
    -
    -
    -to_1tuple = _ntuple(1)
    -to_2tuple = _ntuple(2)
    -to_3tuple = _ntuple(3)
    -to_4tuple = _ntuple(4)
    -to_ntuple = _ntuple
    \ No newline at end of file
    diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/webui_locale.py b/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/webui_locale.py
    deleted file mode 100644
    index 1ce4d97b9b41cbb2d9be3fdadc4c85f6ef897604..0000000000000000000000000000000000000000
    --- a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/webui_locale.py
    +++ /dev/null
    @@ -1,26 +0,0 @@
    -import os
    -import locale
    -import commentjson as json
    -
    -class I18nAuto:
    -    def __init__(self):
    -        if os.path.exists("config.json"):
    -            with open("config.json", "r", encoding='utf-8') as f:
    -                config = json.load(f)
    -        else:
    -            config = {}
    -        lang_config = config.get("language", "auto")
    -        language = os.environ.get("LANGUAGE", lang_config)
    -        if language == "auto":
    -            language = locale.getdefaultlocale()[0] # get the language code of the system (ex. zh_CN)
    -        self.language_map = {}
    -        self.file_is_exists = os.path.isfile(f"./locale/{language}.json")
    -        if self.file_is_exists:
    -            with open(f"./locale/{language}.json", "r", encoding="utf-8") as f:
    -                self.language_map.update(json.load(f))
    -
    -    def __call__(self, key):
    -        if self.file_is_exists and key in self.language_map:
    -            return self.language_map[key]
    -        else:
    -            return key
    diff --git a/spaces/JonatanGk/catalonia-independence-detector/README.md b/spaces/JonatanGk/catalonia-independence-detector/README.md
    deleted file mode 100644
    index ae3fc614e3a7fb00959628c8c13bf0c01af53d52..0000000000000000000000000000000000000000
    --- a/spaces/JonatanGk/catalonia-independence-detector/README.md
    +++ /dev/null
    @@ -1,11 +0,0 @@
    ----
    -title: Catalonia Independence Detector
    -emoji: 👎👍
    -colorFrom: yellow
    -colorTo: pink
    -sdk: gradio
    -app_file: app.py
    -pinned: false
    ----
    -
    -
    diff --git a/spaces/JosueElias/borrs/README.md b/spaces/JosueElias/borrs/README.md
    deleted file mode 100644
    index cabae4713f9fa43d69dd46574a3ff67261011667..0000000000000000000000000000000000000000
    --- a/spaces/JosueElias/borrs/README.md
    +++ /dev/null
    @@ -1,12 +0,0 @@
    ----
    -title: Borrs
    -emoji: 🔥
    -colorFrom: yellow
    -colorTo: purple
    -sdk: streamlit
    -sdk_version: 1.28.0
    -app_file: app.py
    -pinned: false
    ----
    -
    -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
    diff --git a/spaces/KarmKarma/genshinimpact-rvc-models-v2/vc_infer_pipeline.py b/spaces/KarmKarma/genshinimpact-rvc-models-v2/vc_infer_pipeline.py
    deleted file mode 100644
    index 1b70d7b0de4e8bccbdd146ac90a5cfe42ed8749c..0000000000000000000000000000000000000000
    --- a/spaces/KarmKarma/genshinimpact-rvc-models-v2/vc_infer_pipeline.py
    +++ /dev/null
    @@ -1,431 +0,0 @@
    -import numpy as np, parselmouth, torch, pdb
    -from time import time as ttime
    -import torch.nn.functional as F
    -import scipy.signal as signal
    -import pyworld, os, traceback, faiss, librosa, torchcrepe
    -from scipy import signal
    -from functools import lru_cache
    -
    -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
    -
    -input_audio_path2wav = {}
    -
    -
    -@lru_cache
    -def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period):
    -    audio = input_audio_path2wav[input_audio_path]
    -    f0, t = pyworld.harvest(
    -        audio,
    -        fs=fs,
    -        f0_ceil=f0max,
    -        f0_floor=f0min,
    -        frame_period=frame_period,
    -    )
    -    f0 = pyworld.stonemask(audio, f0, t, fs)
    -    return f0
    -
    -
    -def change_rms(data1, sr1, data2, sr2, rate):  # 1是输入音频,2是输出音频,rate是2的占比
    -    # print(data1.max(),data2.max())
    -    rms1 = librosa.feature.rms(
    -        y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2
    -    )  # 每半秒一个点
    -    rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2)
    -    rms1 = torch.from_numpy(rms1)
    -    rms1 = F.interpolate(
    -        rms1.unsqueeze(0), size=data2.shape[0], mode="linear"
    -    ).squeeze()
    -    rms2 = torch.from_numpy(rms2)
    -    rms2 = F.interpolate(
    -        rms2.unsqueeze(0), size=data2.shape[0], mode="linear"
    -    ).squeeze()
    -    rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6)
    -    data2 *= (
    -        torch.pow(rms1, torch.tensor(1 - rate))
    -        * torch.pow(rms2, torch.tensor(rate - 1))
    -    ).numpy()
    -    return data2
    -
    -
    -class VC(object):
    -    def __init__(self, tgt_sr, config):
    -        self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
    -            config.x_pad,
    -            config.x_query,
    -            config.x_center,
    -            config.x_max,
    -            config.is_half,
    -        )
    -        self.sr = 16000  # hubert输入采样率
    -        self.window = 160  # 每帧点数
    -        self.t_pad = self.sr * self.x_pad  # 每条前后pad时间
    -        self.t_pad_tgt = tgt_sr * self.x_pad
    -        self.t_pad2 = self.t_pad * 2
    -        self.t_query = self.sr * self.x_query  # 查询切点前后查询时间
    -        self.t_center = self.sr * self.x_center  # 查询切点位置
    -        self.t_max = self.sr * self.x_max  # 免查询时长阈值
    -        self.device = config.device
    -
    -    def get_f0(
    -        self,
    -        input_audio_path,
    -        x,
    -        p_len,
    -        f0_up_key,
    -        f0_method,
    -        filter_radius,
    -        inp_f0=None,
    -    ):
    -        global input_audio_path2wav
    -        time_step = self.window / self.sr * 1000
    -        f0_min = 50
    -        f0_max = 1100
    -        f0_mel_min = 1127 * np.log(1 + f0_min / 700)
    -        f0_mel_max = 1127 * np.log(1 + f0_max / 700)
    -        if f0_method == "pm":
    -            f0 = (
    -                parselmouth.Sound(x, self.sr)
    -                .to_pitch_ac(
    -                    time_step=time_step / 1000,
    -                    voicing_threshold=0.6,
    -                    pitch_floor=f0_min,
    -                    pitch_ceiling=f0_max,
    -                )
    -                .selected_array["frequency"]
    -            )
    -            pad_size = (p_len - len(f0) + 1) // 2
    -            if pad_size > 0 or p_len - len(f0) - pad_size > 0:
    -                f0 = np.pad(
    -                    f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
    -                )
    -        elif f0_method == "harvest":
    -            input_audio_path2wav[input_audio_path] = x.astype(np.double)
    -            f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)
    -            if filter_radius > 2:
    -                f0 = signal.medfilt(f0, 3)
    -        elif f0_method == "crepe":
    -            model = "full"
    -            # Pick a batch size that doesn't cause memory errors on your gpu
    -            batch_size = 512
    -            # Compute pitch using first gpu
    -            audio = torch.tensor(np.copy(x))[None].float()
    -            f0, pd = torchcrepe.predict(
    -                audio,
    -                self.sr,
    -                self.window,
    -                f0_min,
    -                f0_max,
    -                model,
    -                batch_size=batch_size,
    -                device=self.device,
    -                return_periodicity=True,
    -            )
    -            pd = torchcrepe.filter.median(pd, 3)
    -            f0 = torchcrepe.filter.mean(f0, 3)
    -            f0[pd < 0.1] = 0
    -            f0 = f0[0].cpu().numpy()
    -        f0 *= pow(2, f0_up_key / 12)
    -        # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
    -        tf0 = self.sr // self.window  # 每秒f0点数
    -        if inp_f0 is not None:
    -            delta_t = np.round(
    -                (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
    -            ).astype("int16")
    -            replace_f0 = np.interp(
    -                list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
    -            )
    -            shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]
    -            f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[
    -                :shape
    -            ]
    -        # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
    -        f0bak = f0.copy()
    -        f0_mel = 1127 * np.log(1 + f0 / 700)
    -        f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
    -            f0_mel_max - f0_mel_min
    -        ) + 1
    -        f0_mel[f0_mel <= 1] = 1
    -        f0_mel[f0_mel > 255] = 255
    -        f0_coarse = np.rint(f0_mel).astype(np.int)
    -        return f0_coarse, f0bak  # 1-0
    -
    -    def vc(
    -        self,
    -        model,
    -        net_g,
    -        sid,
    -        audio0,
    -        pitch,
    -        pitchf,
    -        times,
    -        index,
    -        big_npy,
    -        index_rate,
    -        version,
    -        protect,
    -    ):  # ,file_index,file_big_npy
    -        feats = torch.from_numpy(audio0)
    -        if self.is_half:
    -            feats = feats.half()
    -        else:
    -            feats = feats.float()
    -        if feats.dim() == 2:  # double channels
    -            feats = feats.mean(-1)
    -        assert feats.dim() == 1, feats.dim()
    -        feats = feats.view(1, -1)
    -        padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
    -
    -        inputs = {
    -            "source": feats.to(self.device),
    -            "padding_mask": padding_mask,
    -            "output_layer": 9 if version == "v1" else 12,
    -        }
    -        t0 = ttime()
    -        with torch.no_grad():
    -            logits = model.extract_features(**inputs)
    -            feats = model.final_proj(logits[0]) if version == "v1" else logits[0]
    -        if protect < 0.5 and pitch != None and pitchf != None:
    -            feats0 = feats.clone()
    -        if (
    -            isinstance(index, type(None)) == False
    -            and isinstance(big_npy, type(None)) == False
    -            and index_rate != 0
    -        ):
    -            npy = feats[0].cpu().numpy()
    -            if self.is_half:
    -                npy = npy.astype("float32")
    -
    -            # _, I = index.search(npy, 1)
    -            # npy = big_npy[I.squeeze()]
    -
    -            score, ix = index.search(npy, k=8)
    -            weight = np.square(1 / score)
    -            weight /= weight.sum(axis=1, keepdims=True)
    -            npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
    -
    -            if self.is_half:
    -                npy = npy.astype("float16")
    -            feats = (
    -                torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
    -                + (1 - index_rate) * feats
    -            )
    -
    -        feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
    -        if protect < 0.5 and pitch != None and pitchf != None:
    -            feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
    -                0, 2, 1
    -            )
    -        t1 = ttime()
    -        p_len = audio0.shape[0] // self.window
    -        if feats.shape[1] < p_len:
    -            p_len = feats.shape[1]
    -            if pitch != None and pitchf != None:
    -                pitch = pitch[:, :p_len]
    -                pitchf = pitchf[:, :p_len]
    -
    -        if protect < 0.5 and pitch != None and pitchf != None:
    -            pitchff = pitchf.clone()
    -            pitchff[pitchf > 0] = 1
    -            pitchff[pitchf < 1] = protect
    -            pitchff = pitchff.unsqueeze(-1)
    -            feats = feats * pitchff + feats0 * (1 - pitchff)
    -            feats = feats.to(feats0.dtype)
    -        p_len = torch.tensor([p_len], device=self.device).long()
    -        with torch.no_grad():
    -            if pitch != None and pitchf != None:
    -                audio1 = (
    -                    (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0])
    -                    .data.cpu()
    -                    .float()
    -                    .numpy()
    -                )
    -            else:
    -                audio1 = (
    -                    (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy()
    -                )
    -        del feats, p_len, padding_mask
    -        if torch.cuda.is_available():
    -            torch.cuda.empty_cache()
    -        t2 = ttime()
    -        times[0] += t1 - t0
    -        times[2] += t2 - t1
    -        return audio1
    -
    -    def pipeline(
    -        self,
    -        model,
    -        net_g,
    -        sid,
    -        audio,
    -        input_audio_path,
    -        times,
    -        f0_up_key,
    -        f0_method,
    -        file_index,
    -        # file_big_npy,
    -        index_rate,
    -        if_f0,
    -        filter_radius,
    -        tgt_sr,
    -        resample_sr,
    -        rms_mix_rate,
    -        version,
    -        protect,
    -        f0_file=None,
    -    ):
    -        if (
    -            file_index != ""
    -            # and file_big_npy != ""
    -            # and os.path.exists(file_big_npy) == True
    -            and os.path.exists(file_index) == True
    -            and index_rate != 0
    -        ):
    -            try:
    -                index = faiss.read_index(file_index)
    -                # big_npy = np.load(file_big_npy)
    -                big_npy = index.reconstruct_n(0, index.ntotal)
    -            except:
    -                traceback.print_exc()
    -                index = big_npy = None
    -        else:
    -            index = big_npy = None
    -        audio = signal.filtfilt(bh, ah, audio)
    -        audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
    -        opt_ts = []
    -        if audio_pad.shape[0] > self.t_max:
    -            audio_sum = np.zeros_like(audio)
    -            for i in range(self.window):
    -                audio_sum += audio_pad[i : i - self.window]
    -            for t in range(self.t_center, audio.shape[0], self.t_center):
    -                opt_ts.append(
    -                    t
    -                    - self.t_query
    -                    + np.where(
    -                        np.abs(audio_sum[t - self.t_query : t + self.t_query])
    -                        == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
    -                    )[0][0]
    -                )
    -        s = 0
    -        audio_opt = []
    -        t = None
    -        t1 = ttime()
    -        audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
    -        p_len = audio_pad.shape[0] // self.window
    -        inp_f0 = None
    -        if hasattr(f0_file, "name") == True:
    -            try:
    -                with open(f0_file.name, "r") as f:
    -                    lines = f.read().strip("\n").split("\n")
    -                inp_f0 = []
    -                for line in lines:
    -                    inp_f0.append([float(i) for i in line.split(",")])
    -                inp_f0 = np.array(inp_f0, dtype="float32")
    -            except:
    -                traceback.print_exc()
    -        sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
    -        pitch, pitchf = None, None
    -        if if_f0 == 1:
    -            pitch, pitchf = self.get_f0(
    -                input_audio_path,
    -                audio_pad,
    -                p_len,
    -                f0_up_key,
    -                f0_method,
    -                filter_radius,
    -                inp_f0,
    -            )
    -            pitch = pitch[:p_len]
    -            pitchf = pitchf[:p_len]
    -            if self.device == "mps":
    -                pitchf = pitchf.astype(np.float32)
    -            pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
    -            pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
    -        t2 = ttime()
    -        times[1] += t2 - t1
    -        for t in opt_ts:
    -            t = t // self.window * self.window
    -            if if_f0 == 1:
    -                audio_opt.append(
    -                    self.vc(
    -                        model,
    -                        net_g,
    -                        sid,
    -                        audio_pad[s : t + self.t_pad2 + self.window],
    -                        pitch[:, s // self.window : (t + self.t_pad2) // self.window],
    -                        pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
    -                        times,
    -                        index,
    -                        big_npy,
    -                        index_rate,
    -                        version,
    -                        protect,
    -                    )[self.t_pad_tgt : -self.t_pad_tgt]
    -                )
    -            else:
    -                audio_opt.append(
    -                    self.vc(
    -                        model,
    -                        net_g,
    -                        sid,
    -                        audio_pad[s : t + self.t_pad2 + self.window],
    -                        None,
    -                        None,
    -                        times,
    -                        index,
    -                        big_npy,
    -                        index_rate,
    -                        version,
    -                        protect,
    -                    )[self.t_pad_tgt : -self.t_pad_tgt]
    -                )
    -            s = t
    -        if if_f0 == 1:
    -            audio_opt.append(
    -                self.vc(
    -                    model,
    -                    net_g,
    -                    sid,
    -                    audio_pad[t:],
    -                    pitch[:, t // self.window :] if t is not None else pitch,
    -                    pitchf[:, t // self.window :] if t is not None else pitchf,
    -                    times,
    -                    index,
    -                    big_npy,
    -                    index_rate,
    -                    version,
    -                    protect,
    -                )[self.t_pad_tgt : -self.t_pad_tgt]
    -            )
    -        else:
    -            audio_opt.append(
    -                self.vc(
    -                    model,
    -                    net_g,
    -                    sid,
    -                    audio_pad[t:],
    -                    None,
    -                    None,
    -                    times,
    -                    index,
    -                    big_npy,
    -                    index_rate,
    -                    version,
    -                    protect,
    -                )[self.t_pad_tgt : -self.t_pad_tgt]
    -            )
    -        audio_opt = np.concatenate(audio_opt)
    -        if rms_mix_rate != 1:
    -            audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)
    -        if resample_sr >= 16000 and tgt_sr != resample_sr:
    -            audio_opt = librosa.resample(
    -                audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
    -            )
    -        audio_max = np.abs(audio_opt).max() / 0.99
    -        max_int16 = 32768
    -        if audio_max > 1:
    -            max_int16 /= audio_max
    -        audio_opt = (audio_opt * max_int16).astype(np.int16)
    -        del pitch, pitchf, sid
    -        if torch.cuda.is_available():
    -            torch.cuda.empty_cache()
    -        return audio_opt
    \ No newline at end of file
    diff --git a/spaces/Kevin676/AutoGPT/.github/PULL_REQUEST_TEMPLATE.md b/spaces/Kevin676/AutoGPT/.github/PULL_REQUEST_TEMPLATE.md
    deleted file mode 100644
    index a4f28a3d27d66d79cb95f2b8b847832172bb5f11..0000000000000000000000000000000000000000
    --- a/spaces/Kevin676/AutoGPT/.github/PULL_REQUEST_TEMPLATE.md
    +++ /dev/null
    @@ -1,40 +0,0 @@
    -
    -
    -
    -
    -### Background
    -
    -
    -### Changes
    -
    -
    -### Documentation
    -
    -
    -### Test Plan
    -
    -
    -### PR Quality Checklist
    -- [ ] My pull request is atomic and focuses on a single change.
    -- [ ] I have thoroughly tested my changes with multiple different prompts.
    -- [ ] I have considered potential risks and mitigations for my changes.
    -- [ ] I have documented my changes clearly and comprehensively.
    -- [ ] I have not snuck in any "extra" small tweaks changes 
    -
    -
    -
    -
    diff --git a/spaces/Kevin676/AutoGPT/tests/integration/milvus_memory_tests.py b/spaces/Kevin676/AutoGPT/tests/integration/milvus_memory_tests.py
    deleted file mode 100644
    index ec38bf2f72087b5da679d26594ebff97d8a09b19..0000000000000000000000000000000000000000
    --- a/spaces/Kevin676/AutoGPT/tests/integration/milvus_memory_tests.py
    +++ /dev/null
    @@ -1,57 +0,0 @@
    -# sourcery skip: snake-case-functions
    -"""Tests for the MilvusMemory class."""
    -import random
    -import string
    -import unittest
    -
    -from autogpt.config import Config
    -from autogpt.memory.milvus import MilvusMemory
    -
    -try:
    -
    -    class TestMilvusMemory(unittest.TestCase):
    -        """Tests for the MilvusMemory class."""
    -
    -        def random_string(self, length: int) -> str:
    -            """Generate a random string of the given length."""
    -            return "".join(random.choice(string.ascii_letters) for _ in range(length))
    -
    -        def setUp(self) -> None:
    -            """Set up the test environment."""
    -            cfg = Config()
    -            cfg.milvus_addr = "localhost:19530"
    -            self.memory = MilvusMemory(cfg)
    -            self.memory.clear()
    -
    -            # Add example texts to the cache
    -            self.example_texts = [
    -                "The quick brown fox jumps over the lazy dog",
    -                "I love machine learning and natural language processing",
    -                "The cake is a lie, but the pie is always true",
    -                "ChatGPT is an advanced AI model for conversation",
    -            ]
    -
    -            for text in self.example_texts:
    -                self.memory.add(text)
    -
    -            # Add some random strings to test noise
    -            for _ in range(5):
    -                self.memory.add(self.random_string(10))
    -
    -        def test_get_relevant(self) -> None:
    -            """Test getting relevant texts from the cache."""
    -            query = "I'm interested in artificial intelligence and NLP"
    -            num_relevant = 3
    -            relevant_texts = self.memory.get_relevant(query, num_relevant)
    -
    -            print(f"Top {k} relevant texts for the query '{query}':")
    -            for i, text in enumerate(relevant_texts, start=1):
    -                print(f"{i}. {text}")
    -
    -            self.assertEqual(len(relevant_texts), k)
    -            self.assertIn(self.example_texts[1], relevant_texts)
    -
    -except:
    -    print(
    -        "Skipping tests/integration/milvus_memory_tests.py as Milvus is not installed."
    -    )
    diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/fregan/stft_loss.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/fregan/stft_loss.py
    deleted file mode 100644
    index e47447455341e5725d6f82ded66dc08b5d2b1cc5..0000000000000000000000000000000000000000
    --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/fregan/stft_loss.py
    +++ /dev/null
    @@ -1,136 +0,0 @@
    -# -*- coding: utf-8 -*-
    -
    -# Copyright 2019 Tomoki Hayashi
    -#  MIT License (https://opensource.org/licenses/MIT)
    -
    -"""STFT-based Loss modules."""
    -
    -import torch
    -import torch.nn.functional as F
    -
    -
    -def stft(x, fft_size, hop_size, win_length, window):
    -    """Perform STFT and convert to magnitude spectrogram.
    -    Args:
    -        x (Tensor): Input signal tensor (B, T).
    -        fft_size (int): FFT size.
    -        hop_size (int): Hop size.
    -        win_length (int): Window length.
    -        window (str): Window function type.
    -    Returns:
    -        Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1).
    -    """
    -    x_stft = torch.stft(x, fft_size, hop_size, win_length, window)
    -    real = x_stft[..., 0]
    -    imag = x_stft[..., 1]
    -
    -    # NOTE(kan-bayashi): clamp is needed to avoid nan or inf
    -    return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1)
    -
    -
    -class SpectralConvergengeLoss(torch.nn.Module):
    -    """Spectral convergence loss module."""
    -
    -    def __init__(self):
    -        """Initilize spectral convergence loss module."""
    -        super(SpectralConvergengeLoss, self).__init__()
    -
    -    def forward(self, x_mag, y_mag):
    -        """Calculate forward propagation.
    -        Args:
    -            x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
    -            y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
    -        Returns:
    -            Tensor: Spectral convergence loss value.
    -        """
    -        return torch.norm(y_mag - x_mag, p="fro") / torch.norm(y_mag, p="fro")
    -
    -
    -class LogSTFTMagnitudeLoss(torch.nn.Module):
    -    """Log STFT magnitude loss module."""
    -
    -    def __init__(self):
    -        """Initilize los STFT magnitude loss module."""
    -        super(LogSTFTMagnitudeLoss, self).__init__()
    -
    -    def forward(self, x_mag, y_mag):
    -        """Calculate forward propagation.
    -        Args:
    -            x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
    -            y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
    -        Returns:
    -            Tensor: Log STFT magnitude loss value.
    -        """
    -        return F.l1_loss(torch.log(y_mag), torch.log(x_mag))
    -
    -
    -class STFTLoss(torch.nn.Module):
    -    """STFT loss module."""
    -
    -    def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window"):
    -        """Initialize STFT loss module."""
    -        super(STFTLoss, self).__init__()
    -        self.fft_size = fft_size
    -        self.shift_size = shift_size
    -        self.win_length = win_length
    -        self.window = getattr(torch, window)(win_length)
    -        self.spectral_convergenge_loss = SpectralConvergengeLoss()
    -        self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss()
    -
    -    def forward(self, x, y):
    -        """Calculate forward propagation.
    -        Args:
    -            x (Tensor): Predicted signal (B, T).
    -            y (Tensor): Groundtruth signal (B, T).
    -        Returns:
    -            Tensor: Spectral convergence loss value.
    -            Tensor: Log STFT magnitude loss value.
    -        """
    -        x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window.to(x.get_device()))
    -        y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window.to(x.get_device()))
    -        sc_loss = self.spectral_convergenge_loss(x_mag, y_mag)
    -        mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag)
    -
    -        return sc_loss, mag_loss
    -
    -
    -class MultiResolutionSTFTLoss(torch.nn.Module):
    -    """Multi resolution STFT loss module."""
    -
    -    def __init__(self,
    -                 fft_sizes=[1024, 2048, 512],
    -                 hop_sizes=[120, 240, 50],
    -                 win_lengths=[600, 1200, 240],
    -                 window="hann_window"):
    -        """Initialize Multi resolution STFT loss module.
    -        Args:
    -            fft_sizes (list): List of FFT sizes.
    -            hop_sizes (list): List of hop sizes.
    -            win_lengths (list): List of window lengths.
    -            window (str): Window function type.
    -        """
    -        super(MultiResolutionSTFTLoss, self).__init__()
    -        assert len(fft_sizes) == len(hop_sizes) == len(win_lengths)
    -        self.stft_losses = torch.nn.ModuleList()
    -        for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths):
    -            self.stft_losses += [STFTLoss(fs, ss, wl, window)]
    -
    -    def forward(self, x, y):
    -        """Calculate forward propagation.
    -        Args:
    -            x (Tensor): Predicted signal (B, T).
    -            y (Tensor): Groundtruth signal (B, T).
    -        Returns:
    -            Tensor: Multi resolution spectral convergence loss value.
    -            Tensor: Multi resolution log STFT magnitude loss value.
    -        """
    -        sc_loss = 0.0
    -        mag_loss = 0.0
    -        for f in self.stft_losses:
    -            sc_l, mag_l = f(x, y)
    -            sc_loss += sc_l
    -            mag_loss += mag_l
    -        sc_loss /= len(self.stft_losses)
    -        mag_loss /= len(self.stft_losses)
    -
    -        return sc_loss, mag_loss
    \ No newline at end of file
    diff --git a/spaces/Kushiii112/stabilityai-stable-diffusion-xl-base-1.0/app.py b/spaces/Kushiii112/stabilityai-stable-diffusion-xl-base-1.0/app.py
    deleted file mode 100644
    index 9520517f687cf7229ddfab9d8c5f8af7f76b0bd4..0000000000000000000000000000000000000000
    --- a/spaces/Kushiii112/stabilityai-stable-diffusion-xl-base-1.0/app.py
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -import gradio as gr
    -
    -gr.Interface.load("models/stabilityai/stable-diffusion-xl-base-1.0").launch()
    \ No newline at end of file
    diff --git a/spaces/Lianjd/stock_dashboard/backtrader/indicators/kst.py b/spaces/Lianjd/stock_dashboard/backtrader/indicators/kst.py
    deleted file mode 100644
    index 9f358f7b69aff3066918523580d14002385a75bd..0000000000000000000000000000000000000000
    --- a/spaces/Lianjd/stock_dashboard/backtrader/indicators/kst.py
    +++ /dev/null
    @@ -1,77 +0,0 @@
    -#!/usr/bin/env python
    -# -*- coding: utf-8; py-indent-offset:4 -*-
    -###############################################################################
    -#
    -# Copyright (C) 2015-2020 Daniel Rodriguez
    -#
    -# This program is free software: you can redistribute it and/or modify
    -# it under the terms of the GNU General Public License as published by
    -# the Free Software Foundation, either version 3 of the License, or
    -# (at your option) any later version.
    -#
    -# This program is distributed in the hope that it will be useful,
    -# but WITHOUT ANY WARRANTY; without even the implied warranty of
    -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    -# GNU General Public License for more details.
    -#
    -# You should have received a copy of the GNU General Public License
    -# along with this program.  If not, see .
    -#
    -###############################################################################
    -from __future__ import (absolute_import, division, print_function,
    -                        unicode_literals)
    -
    -import backtrader as bt
    -from . import SMA, ROC100
    -
    -
    -class KnowSureThing(bt.Indicator):
    -    '''
    -    It is a "summed" momentum indicator. Developed by Martin Pring and
    -    published in 1992 in Stocks & Commodities.
    -
    -    Formula:
    -      - rcma1 = MovAv(roc100(rp1), period)
    -      - rcma2 = MovAv(roc100(rp2), period)
    -      - rcma3 = MovAv(roc100(rp3), period)
    -      - rcma4 = MovAv(roc100(rp4), period)
    -
    -      - kst = 1.0 * rcma1 + 2.0 * rcma2 + 3.0 * rcma3 + 4.0 * rcma4
    -      - signal = MovAv(kst, speriod)
    -
    -    See:
    -      - http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:know_sure_thing_kst
    -
    -    Params
    -
    -      - ``rma1``, ``rma2``, ``rma3``, ``rma4``: for the MovingAverages on ROCs
    -      - ``rp1``, ``rp2``, ``rp3``, ``rp4``: for the ROCs
    -      - ``rsig``: for the MovingAverage for the signal line
    -      - ``rfactors``: list of factors to apply to the different MovAv(ROCs)
    -      - ``_movav`` and ``_movavs``, allows to change the Moving Average type
    -        applied for the calculation of kst and signal
    -
    -    '''
    -    alias = ('KST',)
    -    lines = ('kst', 'signal',)
    -    params = (
    -        ('rp1', 10), ('rp2', 15), ('rp3', 20), ('rp4', 30),
    -        ('rma1', 10), ('rma2', 10), ('rma3', 10), ('rma4', 10),
    -        ('rsignal', 9),
    -        ('rfactors', [1.0, 2.0, 3.0, 4.0]),
    -        ('_rmovav', SMA),
    -        ('_smovav', SMA),
    -    )
    -
    -    plotinfo = dict(plothlines=[0.0])
    -
    -    def __init__(self):
    -        rcma1 = self.p._rmovav(ROC100(period=self.p.rp1), period=self.p.rma1)
    -        rcma2 = self.p._rmovav(ROC100(period=self.p.rp2), period=self.p.rma2)
    -        rcma3 = self.p._rmovav(ROC100(period=self.p.rp3), period=self.p.rma3)
    -        rcma4 = self.p._rmovav(ROC100(period=self.p.rp4), period=self.p.rma4)
    -        self.l.kst = sum([rfi * rci for rfi, rci in
    -                          zip(self.p.rfactors, [rcma1, rcma2, rcma3, rcma4])])
    -
    -        self.l.signal = self.p._smovav(self.l.kst, period=self.p.rsignal)
    -        super(KnowSureThing, self).__init__()
    diff --git a/spaces/LucasCodeBreak/MusicGen/audiocraft/modules/activations.py b/spaces/LucasCodeBreak/MusicGen/audiocraft/modules/activations.py
    deleted file mode 100644
    index 8bd6f2917a56d72db56555d0ff54b2311bc21778..0000000000000000000000000000000000000000
    --- a/spaces/LucasCodeBreak/MusicGen/audiocraft/modules/activations.py
    +++ /dev/null
    @@ -1,96 +0,0 @@
    -# Copyright (c) Meta Platforms, Inc. and affiliates.
    -# All rights reserved.
    -#
    -# This source code is licensed under the license found in the
    -# LICENSE file in the root directory of this source tree.
    -
    -import torch
    -import torch.nn as nn
    -from torch import Tensor
    -from typing import Union, Callable
    -
    -
    -class CustomGLU(nn.Module):
    -    """Custom Gated Linear Unit activation.
    -    Applies a modified gated linear unit :math:`a * f(b)` where :math:`a` is the first half
    -    of the input matrices, :math:`b` is the second half, and :math:`f` is a provided activation
    -    function (i.e. sigmoid, swish, etc.).
    -
    -    Args:
    -        activation (nn.Module): The custom activation to apply in the Gated Linear Unit
    -        dim (int): the dimension on which to split the input. Default: -1
    -
    -    Shape:
    -        - Input: :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional
    -          dimensions
    -        - Output: :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`
    -
    -    Examples::
    -        >>> m = CustomGLU(nn.Sigmoid())
    -        >>> input = torch.randn(4, 2)
    -        >>> output = m(input)
    -    """
    -    def __init__(self, activation: nn.Module, dim: int = -1):
    -        super(CustomGLU, self).__init__()
    -        self.dim = dim
    -        self.activation = activation
    -
    -    def forward(self, x: Tensor):
    -        assert x.shape[self.dim] % 2 == 0  # M = N / 2
    -        a, b = torch.chunk(x, 2, dim=self.dim)
    -        return a * self.activation(b)
    -
    -
    -class SwiGLU(CustomGLU):
    -    """SiLU Gated Linear Unit activation.
    -    Applies SiLU Gated Linear Unit :math:`a * SiLU(b)` where :math:`a` is
    -    the first half of the input matrices, :math:`b` is the second half.
    -
    -    Args:
    -        dim (int): the dimension on which to split the input. Default: -1
    -    """
    -    def __init__(self, dim: int = -1):
    -        super(SwiGLU, self).__init__(nn.SiLU(), dim)
    -
    -
    -class GeGLU(CustomGLU):
    -    """GeLU Gated Linear Unit activation.
    -    Applies GeLU Gated Linear Unit :math:`a * GELU(b)` where :math:`a` is
    -    the first half of the input matrices, :math:`b` is the second half.
    -
    -    Args:
    -        dim (int): the dimension on which to split the input. Default: -1
    -    """
    -    def __init__(self, dim: int = -1):
    -        super(GeGLU, self).__init__(nn.GELU(), dim)
    -
    -
    -class ReGLU(CustomGLU):
    -    """ReLU Gated Linear Unit activation.
    -    Applies ReLU Gated Linear Unit :math:`a * ReLU(b)` where :math:`a` is
    -    the first half of the input matrices, :math:`b` is the second half.
    -
    -    Args:
    -        dim (int): the dimension on which to split the input. Default: -1
    -    """
    -    def __init__(self, dim: int = -1):
    -        super(ReGLU, self).__init__(nn.ReLU(), dim)
    -
    -
    -def get_activation_fn(
    -    activation: Union[str, Callable[[Tensor], Tensor]]
    -) -> Union[str, Callable[[Tensor], Tensor]]:
    -    """Helper function to map an activation string to the activation class.
    -    If the supplied activation is not a string that is recognized, the activation is passed back.
    -
    -    Args:
    -        activation (Union[str, Callable[[Tensor], Tensor]]): Activation to check
    -    """
    -    if isinstance(activation, str):
    -        if activation == "reglu":
    -            return ReGLU()
    -        elif activation == "geglu":
    -            return GeGLU()
    -        elif activation == "swiglu":
    -            return SwiGLU()
    -    return activation
    diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/segment_anything/segment_anything/modeling/common.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/segment_anything/segment_anything/modeling/common.py
    deleted file mode 100644
    index 2bf15236a3eb24d8526073bc4fa2b274cccb3f96..0000000000000000000000000000000000000000
    --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/segment_anything/segment_anything/modeling/common.py
    +++ /dev/null
    @@ -1,43 +0,0 @@
    -# Copyright (c) Meta Platforms, Inc. and affiliates.
    -# All rights reserved.
    -
    -# This source code is licensed under the license found in the
    -# LICENSE file in the root directory of this source tree.
    -
    -import torch
    -import torch.nn as nn
    -
    -from typing import Type
    -
    -
    -class MLPBlock(nn.Module):
    -    def __init__(
    -        self,
    -        embedding_dim: int,
    -        mlp_dim: int,
    -        act: Type[nn.Module] = nn.GELU,
    -    ) -> None:
    -        super().__init__()
    -        self.lin1 = nn.Linear(embedding_dim, mlp_dim)
    -        self.lin2 = nn.Linear(mlp_dim, embedding_dim)
    -        self.act = act()
    -
    -    def forward(self, x: torch.Tensor) -> torch.Tensor:
    -        return self.lin2(self.act(self.lin1(x)))
    -
    -
    -# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
    -# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119  # noqa
    -class LayerNorm2d(nn.Module):
    -    def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
    -        super().__init__()
    -        self.weight = nn.Parameter(torch.ones(num_channels))
    -        self.bias = nn.Parameter(torch.zeros(num_channels))
    -        self.eps = eps
    -
    -    def forward(self, x: torch.Tensor) -> torch.Tensor:
    -        u = x.mean(1, keepdim=True)
    -        s = (x - u).pow(2).mean(1, keepdim=True)
    -        x = (x - u) / torch.sqrt(s + self.eps)
    -        x = self.weight[:, None, None] * x + self.bias[:, None, None]
    -        return x
    diff --git a/spaces/Marshalls/testmtd/misc/copy_chpt_from_jeanzay.sh b/spaces/Marshalls/testmtd/misc/copy_chpt_from_jeanzay.sh
    deleted file mode 100644
    index 6d781d4efbcd0ffb73ed85db61fd7a661d9c3ec0..0000000000000000000000000000000000000000
    --- a/spaces/Marshalls/testmtd/misc/copy_chpt_from_jeanzay.sh
    +++ /dev/null
    @@ -1,6 +0,0 @@
    -#!/bin/bash
    -exp=$1
    -version=$2
    -mkdir training/experiments/${exp}
    -mkdir training/experiments/${exp}/version_${version}
    -scp -r jeanzay:/gpfswork/rech/imi/usc19dv/mt-lightning/training/experiments/${exp}/version_${version}/* training/experiments/${exp}/version_${version}
    diff --git a/spaces/MirageML/sjc/sd1/ldm/modules/image_degradation/__init__.py b/spaces/MirageML/sjc/sd1/ldm/modules/image_degradation/__init__.py
    deleted file mode 100644
    index 7836cada81f90ded99c58d5942eea4c3477f58fc..0000000000000000000000000000000000000000
    --- a/spaces/MirageML/sjc/sd1/ldm/modules/image_degradation/__init__.py
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr
    -from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light
    diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/maskrcnn/mask-rcnn_resnet50_fpn_160e_ctw1500.py b/spaces/Mountchicken/MAERec-Gradio/configs/textdet/maskrcnn/mask-rcnn_resnet50_fpn_160e_ctw1500.py
    deleted file mode 100644
    index 547a4212e23e7f3ee188960a7c4858d3bba0d414..0000000000000000000000000000000000000000
    --- a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/maskrcnn/mask-rcnn_resnet50_fpn_160e_ctw1500.py
    +++ /dev/null
    @@ -1,56 +0,0 @@
    -_base_ = [
    -    '_base_mask-rcnn_resnet50_fpn.py',
    -    '../_base_/datasets/ctw1500.py',
    -    '../_base_/default_runtime.py',
    -    '../_base_/schedules/schedule_sgd_base.py',
    -]
    -
    -# optimizer
    -optim_wrapper = dict(optimizer=dict(lr=0.08))
    -train_cfg = dict(max_epochs=160)
    -# learning policy
    -param_scheduler = [
    -    dict(type='LinearLR', end=500, start_factor=0.001, by_epoch=False),
    -    dict(type='MultiStepLR', milestones=[80, 128], end=160),
    -]
    -
    -# dataset settings
    -ctw1500_textdet_train = _base_.ctw1500_textdet_train
    -ctw1500_textdet_test = _base_.ctw1500_textdet_test
    -
    -# test pipeline for CTW1500
    -ctw_test_pipeline = [
    -    dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
    -    dict(type='Resize', scale=(1600, 1600), keep_ratio=True),
    -    # add loading annotation after ``Resize`` because ground truth
    -    # does not need to do resize data transform
    -    dict(
    -        type='LoadOCRAnnotations',
    -        with_polygon=True,
    -        with_bbox=True,
    -        with_label=True),
    -    dict(
    -        type='PackTextDetInputs',
    -        meta_keys=('img_path', 'ori_shape', 'img_shape', 'scale_factor'))
    -]
    -
    -ctw1500_textdet_train.pipeline = _base_.train_pipeline
    -ctw1500_textdet_test.pipeline = ctw_test_pipeline
    -
    -train_dataloader = dict(
    -    batch_size=8,
    -    num_workers=4,
    -    persistent_workers=True,
    -    sampler=dict(type='DefaultSampler', shuffle=True),
    -    dataset=ctw1500_textdet_train)
    -
    -val_dataloader = dict(
    -    batch_size=1,
    -    num_workers=1,
    -    persistent_workers=True,
    -    sampler=dict(type='DefaultSampler', shuffle=False),
    -    dataset=ctw1500_textdet_test)
    -
    -test_dataloader = val_dataloader
    -
    -auto_scale_lr = dict(base_batch_size=8)
    diff --git a/spaces/MrSinan/Reconstruction/aux_functions.py b/spaces/MrSinan/Reconstruction/aux_functions.py
    deleted file mode 100644
    index c8eb966960bfa6979dcea0b3df8d79bd7ca0e08e..0000000000000000000000000000000000000000
    --- a/spaces/MrSinan/Reconstruction/aux_functions.py
    +++ /dev/null
    @@ -1,684 +0,0 @@
    -# Author: aqeelanwar
    -# Created: 27 April,2020, 10:21 PM
    -# Email: aqeel.anwar@gatech.edu
    -
    -from configparser import ConfigParser
    -import cv2, math, os
    -from PIL import Image, ImageDraw
    -from tqdm import tqdm
    -from read_cfg import read_cfg
    -from fit_ellipse import *
    -import random
    -from create_mask import texture_the_mask, color_the_mask
    -from imutils import face_utils
    -import requests
    -from zipfile import ZipFile
    -from tqdm import tqdm
    -import bz2, shutil
    -import numpy as np
    -
    -
    -def download_dlib_model():
    -    print_orderly("Get dlib model", 60)
    -    dlib_model_link = "http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2"
    -    print("Downloading dlib model...")
    -    with requests.get(dlib_model_link, stream=True) as r:
    -        print("Zip file size: ", np.round(len(r.content) / 1024 / 1024, 2), "MB")
    -        destination = (
    -            "dlib_models" + os.path.sep + "shape_predictor_68_face_landmarks.dat.bz2"
    -        )
    -        if not os.path.exists(destination.rsplit(os.path.sep, 1)[0]):
    -            os.mkdir(destination.rsplit(os.path.sep, 1)[0])
    -        print("Saving dlib model...")
    -        with open(destination, "wb") as fd:
    -            for chunk in r.iter_content(chunk_size=32678):
    -                fd.write(chunk)
    -    print("Extracting dlib model...")
    -    with bz2.BZ2File(destination) as fr, open(
    -        "dlib_models/shape_predictor_68_face_landmarks.dat", "wb"
    -    ) as fw:
    -        shutil.copyfileobj(fr, fw)
    -    print("Saved: ", destination)
    -    print_orderly("done", 60)
    -
    -    os.remove(destination)
    -
    -
    -def get_line(face_landmark, image, type="eye", debug=False):
    -    pil_image = Image.fromarray(image)
    -    d = ImageDraw.Draw(pil_image)
    -    left_eye = face_landmark["left_eye"]
    -    right_eye = face_landmark["right_eye"]
    -    left_eye_mid = np.mean(np.array(left_eye), axis=0)
    -    right_eye_mid = np.mean(np.array(right_eye), axis=0)
    -    eye_line_mid = (left_eye_mid + right_eye_mid) / 2
    -
    -    if type == "eye":
    -        left_point = left_eye_mid
    -        right_point = right_eye_mid
    -        mid_point = eye_line_mid
    -
    -    elif type == "nose_mid":
    -        nose_length = (
    -            face_landmark["nose_bridge"][-1][1] - face_landmark["nose_bridge"][0][1]
    -        )
    -        left_point = [left_eye_mid[0], left_eye_mid[1] + nose_length / 2]
    -        right_point = [right_eye_mid[0], right_eye_mid[1] + nose_length / 2]
    -        # mid_point = (
    -        #     face_landmark["nose_bridge"][-1][1] + face_landmark["nose_bridge"][0][1]
    -        # ) / 2
    -
    -        mid_pointY = (
    -            face_landmark["nose_bridge"][-1][1] + face_landmark["nose_bridge"][0][1]
    -        ) / 2
    -        mid_pointX = (
    -            face_landmark["nose_bridge"][-1][0] + face_landmark["nose_bridge"][0][0]
    -        ) / 2
    -        mid_point = (mid_pointX, mid_pointY)
    -
    -    elif type == "nose_tip":
    -        nose_length = (
    -            face_landmark["nose_bridge"][-1][1] - face_landmark["nose_bridge"][0][1]
    -        )
    -        left_point = [left_eye_mid[0], left_eye_mid[1] + nose_length]
    -        right_point = [right_eye_mid[0], right_eye_mid[1] + nose_length]
    -        mid_point = (
    -            face_landmark["nose_bridge"][-1][1] + face_landmark["nose_bridge"][0][1]
    -        ) / 2
    -
    -    elif type == "bottom_lip":
    -        bottom_lip = face_landmark["bottom_lip"]
    -        bottom_lip_mid = np.max(np.array(bottom_lip), axis=0)
    -        shiftY = bottom_lip_mid[1] - eye_line_mid[1]
    -        left_point = [left_eye_mid[0], left_eye_mid[1] + shiftY]
    -        right_point = [right_eye_mid[0], right_eye_mid[1] + shiftY]
    -        mid_point = bottom_lip_mid
    -
    -    elif type == "perp_line":
    -        bottom_lip = face_landmark["bottom_lip"]
    -        bottom_lip_mid = np.mean(np.array(bottom_lip), axis=0)
    -
    -        left_point = eye_line_mid
    -        left_point = face_landmark["nose_bridge"][0]
    -        right_point = bottom_lip_mid
    -
    -        mid_point = bottom_lip_mid
    -
    -    elif type == "nose_long":
    -        nose_bridge = face_landmark["nose_bridge"]
    -        left_point = [nose_bridge[0][0], nose_bridge[0][1]]
    -        right_point = [nose_bridge[-1][0], nose_bridge[-1][1]]
    -
    -        mid_point = left_point
    -
    -    # d.line(eye_mid, width=5, fill='red')
    -    y = [left_point[1], right_point[1]]
    -    x = [left_point[0], right_point[0]]
    -    # cv2.imshow('h', image)
    -    # cv2.waitKey(0)
    -    eye_line = fit_line(x, y, image)
    -    d.line(eye_line, width=5, fill="blue")
    -
    -    # Perpendicular Line
    -    # (midX, midY) and (midX - y2 + y1, midY + x2 - x1)
    -    y = [
    -        (left_point[1] + right_point[1]) / 2,
    -        (left_point[1] + right_point[1]) / 2 + right_point[0] - left_point[0],
    -    ]
    -    x = [
    -        (left_point[0] + right_point[0]) / 2,
    -        (left_point[0] + right_point[0]) / 2 - right_point[1] + left_point[1],
    -    ]
    -    perp_line = fit_line(x, y, image)
    -    if debug:
    -        d.line(perp_line, width=5, fill="red")
    -        pil_image.show()
    -    return eye_line, perp_line, left_point, right_point, mid_point
    -
    -
    -def get_points_on_chin(line, face_landmark, chin_type="chin"):
    -    chin = face_landmark[chin_type]
    -    points_on_chin = []
    -    for i in range(len(chin) - 1):
    -        chin_first_point = [chin[i][0], chin[i][1]]
    -        chin_second_point = [chin[i + 1][0], chin[i + 1][1]]
    -
    -        flag, x, y = line_intersection(line, (chin_first_point, chin_second_point))
    -        if flag:
    -            points_on_chin.append((x, y))
    -
    -    return points_on_chin
    -
    -
    -def plot_lines(face_line, image, debug=False):
    -    pil_image = Image.fromarray(image)
    -    if debug:
    -        d = ImageDraw.Draw(pil_image)
    -        d.line(face_line, width=4, fill="white")
    -        pil_image.show()
    -
    -
    -def line_intersection(line1, line2):
    -    # mid = int(len(line1) / 2)
    -    start = 0
    -    end = -1
    -    line1 = ([line1[start][0], line1[start][1]], [line1[end][0], line1[end][1]])
    -
    -    xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
    -    ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
    -    x = []
    -    y = []
    -    flag = False
    -
    -    def det(a, b):
    -        return a[0] * b[1] - a[1] * b[0]
    -
    -    div = det(xdiff, ydiff)
    -    if div == 0:
    -        return flag, x, y
    -
    -    d = (det(*line1), det(*line2))
    -    x = det(d, xdiff) / div
    -    y = det(d, ydiff) / div
    -
    -    segment_minX = min(line2[0][0], line2[1][0])
    -    segment_maxX = max(line2[0][0], line2[1][0])
    -
    -    segment_minY = min(line2[0][1], line2[1][1])
    -    segment_maxY = max(line2[0][1], line2[1][1])
    -
    -    if (
    -        segment_maxX + 1 >= x >= segment_minX - 1
    -        and segment_maxY + 1 >= y >= segment_minY - 1
    -    ):
    -        flag = True
    -
    -    return flag, x, y
    -
    -
    -def fit_line(x, y, image):
    -    if x[0] == x[1]:
    -        x[0] += 0.1
    -    coefficients = np.polyfit(x, y, 1)
    -    polynomial = np.poly1d(coefficients)
    -    x_axis = np.linspace(0, image.shape[1], 50)
    -    y_axis = polynomial(x_axis)
    -    eye_line = []
    -    for i in range(len(x_axis)):
    -        eye_line.append((x_axis[i], y_axis[i]))
    -
    -    return eye_line
    -
    -
    -def get_six_points(face_landmark, image):
    -    _, perp_line1, _, _, m = get_line(face_landmark, image, type="nose_mid")
    -    face_b = m
    -
    -    perp_line, _, _, _, _ = get_line(face_landmark, image, type="perp_line")
    -    points1 = get_points_on_chin(perp_line1, face_landmark)
    -    points = get_points_on_chin(perp_line, face_landmark)
    -    if not points1:
    -        face_e = tuple(np.asarray(points[0]))
    -    elif not points:
    -        face_e = tuple(np.asarray(points1[0]))
    -    else:
    -        face_e = tuple((np.asarray(points[0]) + np.asarray(points1[0])) / 2)
    -    # face_e = points1[0]
    -    nose_mid_line, _, _, _, _ = get_line(face_landmark, image, type="nose_long")
    -
    -    angle = get_angle(perp_line, nose_mid_line)
    -    # print("angle: ", angle)
    -    nose_mid_line, _, _, _, _ = get_line(face_landmark, image, type="nose_tip")
    -    points = get_points_on_chin(nose_mid_line, face_landmark)
    -    if len(points) < 2:
    -        face_landmark = get_face_ellipse(face_landmark)
    -        # print("extrapolating chin")
    -        points = get_points_on_chin(
    -            nose_mid_line, face_landmark, chin_type="chin_extrapolated"
    -        )
    -        if len(points) < 2:
    -            points = []
    -            points.append(face_landmark["chin"][0])
    -            points.append(face_landmark["chin"][-1])
    -    face_a = points[0]
    -    face_c = points[-1]
    -    # cv2.imshow('j', image)
    -    # cv2.waitKey(0)
    -    nose_mid_line, _, _, _, _ = get_line(face_landmark, image, type="bottom_lip")
    -    points = get_points_on_chin(nose_mid_line, face_landmark)
    -    face_d = points[0]
    -    face_f = points[-1]
    -
    -    six_points = np.float32([face_a, face_b, face_c, face_f, face_e, face_d])
    -
    -    return six_points, angle
    -
    -
    -def get_angle(line1, line2):
    -    delta_y = line1[-1][1] - line1[0][1]
    -    delta_x = line1[-1][0] - line1[0][0]
    -    perp_angle = math.degrees(math.atan2(delta_y, delta_x))
    -    if delta_x < 0:
    -        perp_angle = perp_angle + 180
    -    if perp_angle < 0:
    -        perp_angle += 360
    -    if perp_angle > 180:
    -        perp_angle -= 180
    -
    -    # print("perp", perp_angle)
    -    delta_y = line2[-1][1] - line2[0][1]
    -    delta_x = line2[-1][0] - line2[0][0]
    -    nose_angle = math.degrees(math.atan2(delta_y, delta_x))
    -
    -    if delta_x < 0:
    -        nose_angle = nose_angle + 180
    -    if nose_angle < 0:
    -        nose_angle += 360
    -    if nose_angle > 180:
    -        nose_angle -= 180
    -    # print("nose", nose_angle)
    -
    -    angle = nose_angle - perp_angle
    -    return angle
    -
    -
    -def mask_face(image, face_location, six_points, angle, args, type="surgical"):
    -    debug = False
    -
    -    # Find the face angle
    -    threshold = 13
    -    if angle < -threshold:
    -        type += "_right"
    -    elif angle > threshold:
    -        type += "_left"
    -
    -    face_height = face_location[2] - face_location[0]
    -    face_width = face_location[1] - face_location[3]
    -    # image = image_raw[
    -    #              face_location[0]-int(face_width/2): face_location[2]+int(face_width/2),
    -    #              face_location[3]-int(face_height/2): face_location[1]+int(face_height/2),
    -    #              :,
    -    #              ]
    -    # cv2.imshow('win', image)
    -    # cv2.waitKey(0)
    -    # Read appropriate mask image
    -    w = image.shape[0]
    -    h = image.shape[1]
    -    if not "empty" in type and not "inpaint" in type:
    -        cfg = read_cfg(config_filename="masks.cfg", mask_type=type, verbose=False)
    -    else:
    -        if "left" in type:
    -            str = "surgical_blue_left"
    -        elif "right" in type:
    -            str = "surgical_blue_right"
    -        else:
    -            str = "surgical_blue"
    -        cfg = read_cfg(config_filename="masks.cfg", mask_type=str, verbose=False)
    -    img = cv2.imread(cfg.template, cv2.IMREAD_UNCHANGED)
    -
    -    # Process the mask if necessary
    -    if args.pattern:
    -        # Apply pattern to mask
    -        img = texture_the_mask(img, args.pattern, args.pattern_weight)
    -
    -    if args.color:
    -        # Apply color to mask
    -        img = color_the_mask(img, args.color, args.color_weight)
    -
    -    mask_line = np.float32(
    -        [cfg.mask_a, cfg.mask_b, cfg.mask_c, cfg.mask_f, cfg.mask_e, cfg.mask_d]
    -    )
    -    # Warp the mask
    -    M, mask = cv2.findHomography(mask_line, six_points)
    -    dst_mask = cv2.warpPerspective(img, M, (h, w))
    -    dst_mask_points = cv2.perspectiveTransform(mask_line.reshape(-1, 1, 2), M)
    -    mask = dst_mask[:, :, 3]
    -    face_height = face_location[2] - face_location[0]
    -    face_width = face_location[1] - face_location[3]
    -    image_face = image[
    -        face_location[0] + int(face_height / 2) : face_location[2],
    -        face_location[3] : face_location[1],
    -        :,
    -    ]
    -
    -    image_face = image
    -
    -    # Adjust Brightness
    -    mask_brightness = get_avg_brightness(img)
    -    img_brightness = get_avg_brightness(image_face)
    -    delta_b = 1 + (img_brightness - mask_brightness) / 255
    -    dst_mask = change_brightness(dst_mask, delta_b)
    -
    -    # Adjust Saturation
    -    mask_saturation = get_avg_saturation(img)
    -    img_saturation = get_avg_saturation(image_face)
    -    delta_s = 1 - (img_saturation - mask_saturation) / 255
    -    dst_mask = change_saturation(dst_mask, delta_s)
    -
    -    # Apply mask
    -    mask_inv = cv2.bitwise_not(mask)
    -    img_bg = cv2.bitwise_and(image, image, mask=mask_inv)
    -    img_fg = cv2.bitwise_and(dst_mask, dst_mask, mask=mask)
    -    out_img = cv2.add(img_bg, img_fg[:, :, 0:3])
    -    if "empty" in type or "inpaint" in type:
    -        out_img = img_bg
    -    # Plot key points
    -
    -    if "inpaint" in type:
    -        out_img = cv2.inpaint(out_img, mask, 3, cv2.INPAINT_TELEA)
    -        # dst_NS = cv2.inpaint(img, mask, 3, cv2.INPAINT_NS)
    -
    -    if debug:
    -        for i in six_points:
    -            cv2.circle(out_img, (i[0], i[1]), radius=4, color=(0, 0, 255), thickness=-1)
    -
    -        for i in dst_mask_points:
    -            cv2.circle(
    -                out_img, (i[0][0], i[0][1]), radius=4, color=(0, 255, 0), thickness=-1
    -            )
    -
    -    return out_img, mask
    -
    -
    -def draw_landmarks(face_landmarks, image):
    -    pil_image = Image.fromarray(image)
    -    d = ImageDraw.Draw(pil_image)
    -    for facial_feature in face_landmarks.keys():
    -        d.line(face_landmarks[facial_feature], width=5, fill="white")
    -    pil_image.show()
    -
    -
    -def get_face_ellipse(face_landmark):
    -    chin = face_landmark["chin"]
    -    x = []
    -    y = []
    -    for point in chin:
    -        x.append(point[0])
    -        y.append(point[1])
    -
    -    x = np.asarray(x)
    -    y = np.asarray(y)
    -
    -    a = fitEllipse(x, y)
    -    center = ellipse_center(a)
    -    phi = ellipse_angle_of_rotation(a)
    -    axes = ellipse_axis_length(a)
    -    a, b = axes
    -
    -    arc = 2.2
    -    R = np.arange(0, arc * np.pi, 0.2)
    -    xx = center[0] + a * np.cos(R) * np.cos(phi) - b * np.sin(R) * np.sin(phi)
    -    yy = center[1] + a * np.cos(R) * np.sin(phi) + b * np.sin(R) * np.cos(phi)
    -    chin_extrapolated = []
    -    for i in range(len(R)):
    -        chin_extrapolated.append((xx[i], yy[i]))
    -    face_landmark["chin_extrapolated"] = chin_extrapolated
    -    return face_landmark
    -
    -
    -def get_avg_brightness(img):
    -    img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    -    h, s, v = cv2.split(img_hsv)
    -    return np.mean(v)
    -
    -
    -def get_avg_saturation(img):
    -    img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    -    h, s, v = cv2.split(img_hsv)
    -    return np.mean(v)
    -
    -
    -def change_brightness(img, value=1.0):
    -    img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    -    h, s, v = cv2.split(img_hsv)
    -    v = value * v
    -    v[v > 255] = 255
    -    v = np.asarray(v, dtype=np.uint8)
    -    final_hsv = cv2.merge((h, s, v))
    -    img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
    -    return img
    -
    -
    -def change_saturation(img, value=1.0):
    -    img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    -    h, s, v = cv2.split(img_hsv)
    -    s = value * s
    -    s[s > 255] = 255
    -    s = np.asarray(s, dtype=np.uint8)
    -    final_hsv = cv2.merge((h, s, v))
    -    img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
    -    return img
    -
    -
    -def check_path(path):
    -    is_directory = False
    -    is_file = False
    -    is_other = False
    -    if os.path.isdir(path):
    -        is_directory = True
    -    elif os.path.isfile(path):
    -        is_file = True
    -    else:
    -        is_other = True
    -
    -    return is_directory, is_file, is_other
    -
    -
    -def shape_to_landmarks(shape):
    -    face_landmarks = {}
    -    face_landmarks["left_eyebrow"] = [
    -        tuple(shape[17]),
    -        tuple(shape[18]),
    -        tuple(shape[19]),
    -        tuple(shape[20]),
    -        tuple(shape[21]),
    -    ]
    -    face_landmarks["right_eyebrow"] = [
    -        tuple(shape[22]),
    -        tuple(shape[23]),
    -        tuple(shape[24]),
    -        tuple(shape[25]),
    -        tuple(shape[26]),
    -    ]
    -    face_landmarks["nose_bridge"] = [
    -        tuple(shape[27]),
    -        tuple(shape[28]),
    -        tuple(shape[29]),
    -        tuple(shape[30]),
    -    ]
    -    face_landmarks["nose_tip"] = [
    -        tuple(shape[31]),
    -        tuple(shape[32]),
    -        tuple(shape[33]),
    -        tuple(shape[34]),
    -        tuple(shape[35]),
    -    ]
    -    face_landmarks["left_eye"] = [
    -        tuple(shape[36]),
    -        tuple(shape[37]),
    -        tuple(shape[38]),
    -        tuple(shape[39]),
    -        tuple(shape[40]),
    -        tuple(shape[41]),
    -    ]
    -    face_landmarks["right_eye"] = [
    -        tuple(shape[42]),
    -        tuple(shape[43]),
    -        tuple(shape[44]),
    -        tuple(shape[45]),
    -        tuple(shape[46]),
    -        tuple(shape[47]),
    -    ]
    -    face_landmarks["top_lip"] = [
    -        tuple(shape[48]),
    -        tuple(shape[49]),
    -        tuple(shape[50]),
    -        tuple(shape[51]),
    -        tuple(shape[52]),
    -        tuple(shape[53]),
    -        tuple(shape[54]),
    -        tuple(shape[60]),
    -        tuple(shape[61]),
    -        tuple(shape[62]),
    -        tuple(shape[63]),
    -        tuple(shape[64]),
    -    ]
    -
    -    face_landmarks["bottom_lip"] = [
    -        tuple(shape[54]),
    -        tuple(shape[55]),
    -        tuple(shape[56]),
    -        tuple(shape[57]),
    -        tuple(shape[58]),
    -        tuple(shape[59]),
    -        tuple(shape[48]),
    -        tuple(shape[64]),
    -        tuple(shape[65]),
    -        tuple(shape[66]),
    -        tuple(shape[67]),
    -        tuple(shape[60]),
    -    ]
    -
    -    face_landmarks["chin"] = [
    -        tuple(shape[0]),
    -        tuple(shape[1]),
    -        tuple(shape[2]),
    -        tuple(shape[3]),
    -        tuple(shape[4]),
    -        tuple(shape[5]),
    -        tuple(shape[6]),
    -        tuple(shape[7]),
    -        tuple(shape[8]),
    -        tuple(shape[9]),
    -        tuple(shape[10]),
    -        tuple(shape[11]),
    -        tuple(shape[12]),
    -        tuple(shape[13]),
    -        tuple(shape[14]),
    -        tuple(shape[15]),
    -        tuple(shape[16]),
    -    ]
    -    return face_landmarks
    -
    -
    -def rect_to_bb(rect):
    -    x1 = rect.left()
    -    x2 = rect.right()
    -    y1 = rect.top()
    -    y2 = rect.bottom()
    -    return (x1, x2, y2, x1)
    -
    -
    -def mask_image(theImage, args):
    -    # Read the image
    -    image = theImage
    -    original_image = image.copy()
    -    # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    -    gray = image
    -    face_locations = args.detector(gray, 1)
    -    mask_type = args.mask_type
    -    verbose = args.verbose
    -    if args.code:
    -        ind = random.randint(0, len(args.code_count) - 1)
    -        mask_dict = args.mask_dict_of_dict[ind]
    -        mask_type = mask_dict["type"]
    -        args.color = mask_dict["color"]
    -        args.pattern = mask_dict["texture"]
    -        args.code_count[ind] += 1
    -
    -    elif mask_type == "random":
    -        available_mask_types = get_available_mask_types()
    -        mask_type = random.choice(available_mask_types)
    -
    -    if verbose:
    -        tqdm.write("Faces found: {:2d}".format(len(face_locations)))
    -    # Process each face in the image
    -    masked_images = []
    -    mask_binary_array = []
    -    mask = []
    -    for (i, face_location) in enumerate(face_locations):
    -        shape = args.predictor(gray, face_location)
    -        shape = face_utils.shape_to_np(shape)
    -        face_landmarks = shape_to_landmarks(shape)
    -        face_location = rect_to_bb(face_location)
    -        # draw_landmarks(face_landmarks, image)
    -        six_points_on_face, angle = get_six_points(face_landmarks, image)
    -        mask = []
    -        if mask_type != "all":
    -            if len(masked_images) > 0:
    -                image = masked_images.pop(0)
    -            image, mask_binary = mask_face(
    -                image, face_location, six_points_on_face, angle, args, type=mask_type
    -            )
    -
    -            # compress to face tight
    -            face_height = face_location[2] - face_location[0]
    -            face_width = face_location[1] - face_location[3]
    -            masked_images.append(image)
    -            mask_binary_array.append(mask_binary)
    -            mask.append(mask_type)
    -        else:
    -            available_mask_types = get_available_mask_types()
    -            for m in range(len(available_mask_types)):
    -                if len(masked_images) == len(available_mask_types):
    -                    image = masked_images.pop(m)
    -                img, mask_binary = mask_face(
    -                    image,
    -                    face_location,
    -                    six_points_on_face,
    -                    angle,
    -                    args,
    -                    type=available_mask_types[m],
    -                )
    -                masked_images.insert(m, img)
    -                mask_binary_array.insert(m, mask_binary)
    -            mask = available_mask_types
    -            cc = 1
    -
    -    return masked_images, mask, mask_binary_array, original_image
    -
    -
    -def is_image(path):
    -    try:
    -        extensions = path[-4:]
    -        image_extensions = ["png", "PNG", "jpg", "JPG"]
    -
    -        if extensions[1:] in image_extensions:
    -            return True 
    -        else:
    -            print("Please input image file. png / jpg")
    -            return False 
    -    except: 
    -        return False 
    -
    -
    -def get_available_mask_types(config_filename="masks.cfg"):
    -    parser = ConfigParser()
    -    parser.optionxform = str
    -    parser.read(config_filename)
    -    available_mask_types = parser.sections()
    -    available_mask_types = [
    -        string for string in available_mask_types if "left" not in string
    -    ]
    -    available_mask_types = [
    -        string for string in available_mask_types if "right" not in string
    -    ]
    -
    -    return available_mask_types
    -
    -
    -def print_orderly(str, n):
    -    # print("")
    -    hyphens = "-" * int((n - len(str)) / 2)
    -    str_p = hyphens + " " + str + " " + hyphens
    -    hyphens_bar = "-" * len(str_p)
    -    print(hyphens_bar)
    -    print(str_p)
    -    print(hyphens_bar)
    -
    -
    -def display_MaskTheFace():
    -    with open("utils/display.txt", "r") as file:
    -        for line in file:
    -            cc = 1
    -            print(line, end="")
    diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/losses/README.md b/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/losses/README.md
    deleted file mode 100644
    index 522150cfa1518797b488146fae506bfcaf063b8e..0000000000000000000000000000000000000000
    --- a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/losses/README.md
    +++ /dev/null
    @@ -1,9 +0,0 @@
    -# Losses
    -
    -Losses contains common loss computation used in NLP tasks.
    -
    -* `weighted_sparse_categorical_crossentropy_loss` computes per-batch sparse
    -categorical crossentropy loss.
    -
    -* `weighted_sparse_categorical_crossentropy_per_example_loss` computes
    -per-example sparse categorical crossentropy loss.
    diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/xlnet/classifier_utils.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/xlnet/classifier_utils.py
    deleted file mode 100644
    index 64363e322633f7ae43d6ffc65c99ee1beff36827..0000000000000000000000000000000000000000
    --- a/spaces/NCTCMumbai/NCTC/models/official/nlp/xlnet/classifier_utils.py
    +++ /dev/null
    @@ -1,162 +0,0 @@
    -# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
    -#
    -# Licensed under the Apache License, Version 2.0 (the "License");
    -# you may not use this file except in compliance with the License.
    -# You may obtain a copy of the License at
    -#
    -#     http://www.apache.org/licenses/LICENSE-2.0
    -#
    -# Unless required by applicable law or agreed to in writing, software
    -# distributed under the License is distributed on an "AS IS" BASIS,
    -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -# See the License for the specific language governing permissions and
    -# limitations under the License.
    -# ==============================================================================
    -"""Utilities for pre-processing classification data."""
    -from absl import logging
    -
    -from official.nlp.xlnet import data_utils
    -
    -SEG_ID_A = 0
    -SEG_ID_B = 1
    -
    -
    -class PaddingInputExample(object):
    -  """Fake example so the num input examples is a multiple of the batch size.
    -
    -  When running eval/predict on the TPU, we need to pad the number of examples
    -  to be a multiple of the batch size, because the TPU requires a fixed batch
    -  size. The alternative is to drop the last batch, which is bad because it means
    -  the entire output data won't be generated.
    -  We use this class instead of `None` because treating `None` as padding
    -  battches could cause silent errors.
    -  """
    -
    -
    -class InputFeatures(object):
    -  """A single set of features of data."""
    -
    -  def __init__(self,
    -               input_ids,
    -               input_mask,
    -               segment_ids,
    -               label_id,
    -               is_real_example=True):
    -    self.input_ids = input_ids
    -    self.input_mask = input_mask
    -    self.segment_ids = segment_ids
    -    self.label_id = label_id
    -    self.is_real_example = is_real_example
    -
    -
    -def _truncate_seq_pair(tokens_a, tokens_b, max_length):
    -  """Truncates a sequence pair in place to the maximum length."""
    -
    -  # This is a simple heuristic which will always truncate the longer sequence
    -  # one token at a time. This makes more sense than truncating an equal percent
    -  # of tokens from each, since if one sequence is very short then each token
    -  # that's truncated likely contains more information than a longer sequence.
    -  while True:
    -    total_length = len(tokens_a) + len(tokens_b)
    -    if total_length <= max_length:
    -      break
    -    if len(tokens_a) > len(tokens_b):
    -      tokens_a.pop()
    -    else:
    -      tokens_b.pop()
    -
    -
    -def convert_single_example(example_index, example, label_list, max_seq_length,
    -                           tokenize_fn, use_bert_format):
    -  """Converts a single `InputExample` into a single `InputFeatures`."""
    -
    -  if isinstance(example, PaddingInputExample):
    -    return InputFeatures(
    -        input_ids=[0] * max_seq_length,
    -        input_mask=[1] * max_seq_length,
    -        segment_ids=[0] * max_seq_length,
    -        label_id=0,
    -        is_real_example=False)
    -
    -  if label_list is not None:
    -    label_map = {}
    -    for (i, label) in enumerate(label_list):
    -      label_map[label] = i
    -
    -  tokens_a = tokenize_fn(example.text_a)
    -  tokens_b = None
    -  if example.text_b:
    -    tokens_b = tokenize_fn(example.text_b)
    -
    -  if tokens_b:
    -    # Modifies `tokens_a` and `tokens_b` in place so that the total
    -    # length is less than the specified length.
    -    # Account for two [SEP] & one [CLS] with "- 3"
    -    _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
    -  else:
    -    # Account for one [SEP] & one [CLS] with "- 2"
    -    if len(tokens_a) > max_seq_length - 2:
    -      tokens_a = tokens_a[:max_seq_length - 2]
    -
    -  tokens = []
    -  segment_ids = []
    -  for token in tokens_a:
    -    tokens.append(token)
    -    segment_ids.append(SEG_ID_A)
    -  tokens.append(data_utils.SEP_ID)
    -  segment_ids.append(SEG_ID_A)
    -
    -  if tokens_b:
    -    for token in tokens_b:
    -      tokens.append(token)
    -      segment_ids.append(SEG_ID_B)
    -    tokens.append(data_utils.SEP_ID)
    -    segment_ids.append(SEG_ID_B)
    -
    -  if use_bert_format:
    -    tokens.insert(0, data_utils.CLS_ID)
    -    segment_ids.insert(0, data_utils.SEG_ID_CLS)
    -  else:
    -    tokens.append(data_utils.CLS_ID)
    -    segment_ids.append(data_utils.SEG_ID_CLS)
    -
    -  input_ids = tokens
    -
    -  # The mask has 0 for real tokens and 1 for padding tokens. Only real
    -  # tokens are attended to.
    -  input_mask = [0] * len(input_ids)
    -
    -  # Zero-pad up to the sequence length.
    -  if len(input_ids) < max_seq_length:
    -    delta_len = max_seq_length - len(input_ids)
    -    if use_bert_format:
    -      input_ids = input_ids + [0] * delta_len
    -      input_mask = input_mask + [1] * delta_len
    -      segment_ids = segment_ids + [data_utils.SEG_ID_PAD] * delta_len
    -    else:
    -      input_ids = [0] * delta_len + input_ids
    -      input_mask = [1] * delta_len + input_mask
    -      segment_ids = [data_utils.SEG_ID_PAD] * delta_len + segment_ids
    -
    -  assert len(input_ids) == max_seq_length
    -  assert len(input_mask) == max_seq_length
    -  assert len(segment_ids) == max_seq_length
    -
    -  if label_list is not None:
    -    label_id = label_map[example.label]
    -  else:
    -    label_id = example.label
    -  if example_index < 5:
    -    logging.info("*** Example ***")
    -    logging.info("guid: %s", (example.guid))
    -    logging.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
    -    logging.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
    -    logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
    -    logging.info("label: %d (id = %d)", example.label, label_id)
    -
    -  feature = InputFeatures(
    -      input_ids=input_ids,
    -      input_mask=input_mask,
    -      segment_ids=segment_ids,
    -      label_id=label_id)
    -  return feature
    diff --git a/spaces/NeuML/wikisummary/README.md b/spaces/NeuML/wikisummary/README.md
    deleted file mode 100644
    index 97a3d84499ea2b2802e7f860ca1e7c4fbd77bf1f..0000000000000000000000000000000000000000
    --- a/spaces/NeuML/wikisummary/README.md
    +++ /dev/null
    @@ -1,38 +0,0 @@
    ----
    -title: Wikisummary
    -emoji: 📈
    -colorFrom: purple
    -colorTo: green
    -sdk: streamlit
    -sdk_version: 1.24.0
    -app_file: app.py
    -pinned: false
    ----
    -
    -# Configuration
    -
    -`title`: _string_  
    -Display title for the Space
    -
    -`emoji`: _string_  
    -Space emoji (emoji-only character allowed)
    -
    -`colorFrom`: _string_  
    -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
    -
    -`colorTo`: _string_  
    -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
    -
    -`sdk`: _string_  
    -Can be either `gradio` or `streamlit`
    -
    -`sdk_version` : _string_  
    -Only applicable for `streamlit` SDK.  
    -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
    -
    -`app_file`: _string_  
    -Path to your main application file (which contains either `gradio` or `streamlit` Python code).  
    -Path is relative to the root of the repository.
    -
    -`pinned`: _boolean_  
    -Whether the Space stays on top of your list.
    diff --git a/spaces/NimaBoscarino/climategan/climategan/strings.py b/spaces/NimaBoscarino/climategan/climategan/strings.py
    deleted file mode 100644
    index 37d1af144a7ace94bc07a1e005a7b7d4406f31b6..0000000000000000000000000000000000000000
    --- a/spaces/NimaBoscarino/climategan/climategan/strings.py
    +++ /dev/null
    @@ -1,99 +0,0 @@
    -"""custom __str__ methods for ClimateGAN's classes
    -"""
    -import torch
    -import torch.nn as nn
    -
    -
    -def title(name, color="\033[94m"):
    -    name = "====  " + name + "  ===="
    -    s = "=" * len(name)
    -    s = f"{s}\n{name}\n{s}"
    -    return f"\033[1m{color}{s}\033[0m"
    -
    -
    -def generator(G):
    -    s = title("OmniGenerator", "\033[95m") + "\n"
    -
    -    s += str(G.encoder) + "\n\n"
    -    for d in G.decoders:
    -        if d not in {"a", "t"}:
    -            s += str(G.decoders[d]) + "\n\n"
    -        elif d == "a":
    -            s += "[r & s]\n" + str(G.decoders["a"]["r"]) + "\n\n"
    -        else:
    -            if G.opts.gen.t.use_bit_conditioning:
    -                s += "[bit]\n" + str(G.decoders["t"]) + "\n\n"
    -            else:
    -                s += "[f & n]\n" + str(G.decoders["t"]["f"]) + "\n\n"
    -    return s.strip()
    -
    -
    -def encoder(E):
    -    s = title("Encoder") + "\n"
    -    for b in E.model:
    -        s += str(b) + "\n"
    -    return s.strip()
    -
    -
    -def get_conv_weight(conv):
    -    weight = torch.Tensor(
    -        conv.out_channels, conv.in_channels // conv.groups, *conv.kernel_size
    -    )
    -    return weight.shape
    -
    -
    -def conv2dblock(obj):
    -    name = "{:20}".format("Conv2dBlock")
    -    s = ""
    -    if "SpectralNorm" in obj.conv.__class__.__name__:
    -        s = "SpectralNorm => "
    -        w = str(tuple(get_conv_weight(obj.conv.module)))
    -    else:
    -        w = str(tuple(get_conv_weight(obj.conv)))
    -    return f"{name}{s}{w}".strip()
    -
    -
    -def resblocks(rb):
    -    s = "{}\n".format(f"ResBlocks({len(rb.model)})")
    -    for i, r in enumerate(rb.model):
    -        s += f"  - ({i}) {str(r)}\n"
    -    return s.strip()
    -
    -
    -def resblock(rb):
    -    s = "{:12}".format("Resblock")
    -    return f"{s}{rb.dim} channels, {rb.norm} norm + {rb.activation}"
    -
    -
    -def basedecoder(bd):
    -    s = title(bd.__class__.__name__) + "\n"
    -    for b in bd.model:
    -        if isinstance(b, nn.Upsample) or "InterpolateNearest2d" in b.__class__.__name__:
    -            s += "{:20}".format("Upsample") + "x2\n"
    -        else:
    -            s += str(b) + "\n"
    -    return s.strip()
    -
    -
    -def spaderesblock(srb):
    -    name = "{:20}".format("SPADEResnetBlock") + f"k {srb.kernel_size}, "
    -    s = f"{name}{srb.fin} > {srb.fout}, "
    -    s += f"param_free_norm: {srb.param_free_norm}, "
    -    s += f"spectral_norm: {srb.use_spectral_norm}"
    -    return s.strip()
    -
    -
    -def spadedecoder(sd):
    -    s = title(sd.__class__.__name__) + "\n"
    -    up = "{:20}x2\n".format("Upsample")
    -    s += up
    -    s += str(sd.head_0) + "\n"
    -    s += up
    -    s += str(sd.G_middle_0) + "\n"
    -    s += up
    -    s += str(sd.G_middle_1) + "\n"
    -    for i, u in enumerate(sd.up_spades):
    -        s += up
    -        s += str(u) + "\n"
    -    s += "{:20}".format("Conv2d") + str(tuple(get_conv_weight(sd.conv_img))) + " tanh"
    -    return s
    diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/model_parallel/__init__.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/model_parallel/__init__.py
    deleted file mode 100644
    index 69f21684872f72ae8ee26d9ff7d2d2b6e6d526c3..0000000000000000000000000000000000000000
    --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/model_parallel/__init__.py
    +++ /dev/null
    @@ -1,6 +0,0 @@
    -# Copyright (c) Facebook, Inc. and its affiliates.
    -#
    -# This source code is licensed under the MIT license found in the
    -# LICENSE file in the root directory of this source tree.
    -
    -from . import criterions, models, modules  # noqa
    diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/grad_multiply.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/grad_multiply.py
    deleted file mode 100644
    index 08d15f55dfda9c61a1cf8641ea31424fe1d97f57..0000000000000000000000000000000000000000
    --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/grad_multiply.py
    +++ /dev/null
    @@ -1,18 +0,0 @@
    -# Copyright (c) Facebook, Inc. and its affiliates.
    -#
    -# This source code is licensed under the MIT license found in the
    -# LICENSE file in the root directory of this source tree.
    -
    -import torch
    -
    -
    -class GradMultiply(torch.autograd.Function):
    -    @staticmethod
    -    def forward(ctx, x, scale):
    -        ctx.scale = scale
    -        res = x.new(x)
    -        return res
    -
    -    @staticmethod
    -    def backward(ctx, grad):
    -        return grad * ctx.scale, None
    diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/transformer_sentence_encoder.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/transformer_sentence_encoder.py
    deleted file mode 100644
    index d0540d69229fb994b9e573a5016c9f239b7929e2..0000000000000000000000000000000000000000
    --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/transformer_sentence_encoder.py
    +++ /dev/null
    @@ -1,291 +0,0 @@
    -# Copyright (c) Facebook, Inc. and its affiliates.
    -#
    -# This source code is licensed under the MIT license found in the
    -# LICENSE file in the root directory of this source tree.
    -
    -from typing import Optional, Tuple
    -
    -import torch
    -import torch.nn as nn
    -from fairseq.modules import (
    -    FairseqDropout,
    -    LayerDropModuleList,
    -    LayerNorm,
    -    MultiheadAttention,
    -    PositionalEmbedding,
    -    TransformerSentenceEncoderLayer,
    -)
    -from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
    -
    -
    -def init_bert_params(module):
    -    """
    -    Initialize the weights specific to the BERT Model.
    -    This overrides the default initializations depending on the specified arguments.
    -        1. If normal_init_linear_weights is set then weights of linear
    -           layer will be initialized using the normal distribution and
    -           bais will be set to the specified value.
    -        2. If normal_init_embed_weights is set then weights of embedding
    -           layer will be initialized using the normal distribution.
    -        3. If normal_init_proj_weights is set then weights of
    -           in_project_weight for MultiHeadAttention initialized using
    -           the normal distribution (to be validated).
    -    """
    -
    -    def normal_(data):
    -        # with FSDP, module params will be on CUDA, so we cast them back to CPU
    -        # so that the RNG is consistent with and without FSDP
    -        data.copy_(
    -            data.cpu().normal_(mean=0.0, std=0.02).to(data.device)
    -        )
    -
    -    if isinstance(module, nn.Linear):
    -        normal_(module.weight.data)
    -        if module.bias is not None:
    -            module.bias.data.zero_()
    -    if isinstance(module, nn.Embedding):
    -        normal_(module.weight.data)
    -        if module.padding_idx is not None:
    -            module.weight.data[module.padding_idx].zero_()
    -    if isinstance(module, MultiheadAttention):
    -        normal_(module.q_proj.weight.data)
    -        normal_(module.k_proj.weight.data)
    -        normal_(module.v_proj.weight.data)
    -
    -
    -class TransformerSentenceEncoder(nn.Module):
    -    """
    -    Implementation for a Bi-directional Transformer based Sentence Encoder used
    -    in BERT/XLM style pre-trained models.
    -
    -    This first computes the token embedding using the token embedding matrix,
    -    position embeddings (if specified) and segment embeddings
    -    (if specified). After applying the specified number of
    -    TransformerEncoderLayers, it outputs all the internal states of the
    -    encoder as well as the final representation associated with the first
    -    token (usually CLS token).
    -
    -    Input:
    -        - tokens: B x T matrix representing sentences
    -        - segment_labels: B x T matrix representing segment label for tokens
    -
    -    Output:
    -        - a tuple of the following:
    -            - a list of internal model states used to compute the
    -              predictions where each tensor has shape T x B x C
    -            - sentence representation associated with first input token
    -              in format B x C.
    -    """
    -
    -    def __init__(
    -        self,
    -        padding_idx: int,
    -        vocab_size: int,
    -        num_encoder_layers: int = 6,
    -        embedding_dim: int = 768,
    -        ffn_embedding_dim: int = 3072,
    -        num_attention_heads: int = 8,
    -        dropout: float = 0.1,
    -        attention_dropout: float = 0.1,
    -        activation_dropout: float = 0.1,
    -        layerdrop: float = 0.0,
    -        max_seq_len: int = 256,
    -        num_segments: int = 2,
    -        use_position_embeddings: bool = True,
    -        offset_positions_by_padding: bool = True,
    -        encoder_normalize_before: bool = False,
    -        apply_bert_init: bool = False,
    -        activation_fn: str = "relu",
    -        learned_pos_embedding: bool = True,
    -        embed_scale: float = None,
    -        freeze_embeddings: bool = False,
    -        n_trans_layers_to_freeze: int = 0,
    -        export: bool = False,
    -        traceable: bool = False,
    -        q_noise: float = 0.0,
    -        qn_block_size: int = 8,
    -    ) -> None:
    -
    -        super().__init__()
    -        self.padding_idx = padding_idx
    -        self.vocab_size = vocab_size
    -        self.dropout_module = FairseqDropout(
    -            dropout, module_name=self.__class__.__name__
    -        )
    -        self.layerdrop = layerdrop
    -        self.max_seq_len = max_seq_len
    -        self.embedding_dim = embedding_dim
    -        self.num_segments = num_segments
    -        self.use_position_embeddings = use_position_embeddings
    -        self.apply_bert_init = apply_bert_init
    -        self.learned_pos_embedding = learned_pos_embedding
    -        self.traceable = traceable
    -
    -        self.embed_tokens = self.build_embedding(
    -            self.vocab_size, self.embedding_dim, self.padding_idx
    -        )
    -        self.embed_scale = embed_scale
    -
    -        if q_noise > 0:
    -            self.quant_noise = apply_quant_noise_(
    -                nn.Linear(self.embedding_dim, self.embedding_dim, bias=False),
    -                q_noise,
    -                qn_block_size,
    -            )
    -        else:
    -            self.quant_noise = None
    -
    -        self.segment_embeddings = (
    -            nn.Embedding(self.num_segments, self.embedding_dim, padding_idx=None)
    -            if self.num_segments > 0
    -            else None
    -        )
    -
    -        self.embed_positions = (
    -            PositionalEmbedding(
    -                self.max_seq_len,
    -                self.embedding_dim,
    -                padding_idx=(self.padding_idx if offset_positions_by_padding else None),
    -                learned=self.learned_pos_embedding,
    -            )
    -            if self.use_position_embeddings
    -            else None
    -        )
    -
    -        if encoder_normalize_before:
    -            self.emb_layer_norm = LayerNorm(self.embedding_dim, export=export)
    -        else:
    -            self.emb_layer_norm = None
    -
    -        if self.layerdrop > 0.0:
    -            self.layers = LayerDropModuleList(p=self.layerdrop)
    -        else:
    -            self.layers = nn.ModuleList([])
    -        self.layers.extend(
    -            [
    -                self.build_transformer_sentence_encoder_layer(
    -                    embedding_dim=self.embedding_dim,
    -                    ffn_embedding_dim=ffn_embedding_dim,
    -                    num_attention_heads=num_attention_heads,
    -                    dropout=self.dropout_module.p,
    -                    attention_dropout=attention_dropout,
    -                    activation_dropout=activation_dropout,
    -                    activation_fn=activation_fn,
    -                    export=export,
    -                    q_noise=q_noise,
    -                    qn_block_size=qn_block_size,
    -                )
    -                for _ in range(num_encoder_layers)
    -            ]
    -        )
    -
    -        # Apply initialization of model params after building the model
    -        if self.apply_bert_init:
    -            self.apply(init_bert_params)
    -
    -        def freeze_module_params(m):
    -            if m is not None:
    -                for p in m.parameters():
    -                    p.requires_grad = False
    -
    -        if freeze_embeddings:
    -            freeze_module_params(self.embed_tokens)
    -            freeze_module_params(self.segment_embeddings)
    -            freeze_module_params(self.embed_positions)
    -            freeze_module_params(self.emb_layer_norm)
    -
    -        for layer in range(n_trans_layers_to_freeze):
    -            freeze_module_params(self.layers[layer])
    -
    -    def build_embedding(self, vocab_size, embedding_dim, padding_idx):
    -        return nn.Embedding(vocab_size, embedding_dim, padding_idx)
    -
    -    def build_transformer_sentence_encoder_layer(
    -        self,
    -        embedding_dim,
    -        ffn_embedding_dim,
    -        num_attention_heads,
    -        dropout,
    -        attention_dropout,
    -        activation_dropout,
    -        activation_fn,
    -        export,
    -        q_noise,
    -        qn_block_size,
    -    ):
    -        return TransformerSentenceEncoderLayer(
    -            embedding_dim=embedding_dim,
    -            ffn_embedding_dim=ffn_embedding_dim,
    -            num_attention_heads=num_attention_heads,
    -            dropout=dropout,
    -            attention_dropout=attention_dropout,
    -            activation_dropout=activation_dropout,
    -            activation_fn=activation_fn,
    -            export=export,
    -            q_noise=q_noise,
    -            qn_block_size=qn_block_size,
    -        )
    -
    -    def forward(
    -        self,
    -        tokens: torch.Tensor,
    -        segment_labels: torch.Tensor = None,
    -        last_state_only: bool = False,
    -        positions: Optional[torch.Tensor] = None,
    -        token_embeddings: Optional[torch.Tensor] = None,
    -        attn_mask: Optional[torch.Tensor] = None,
    -    ) -> Tuple[torch.Tensor, torch.Tensor]:
    -        is_tpu = tokens.device.type == "xla"
    -
    -        # compute padding mask. This is needed for multi-head attention
    -        padding_mask = tokens.eq(self.padding_idx)
    -        if not self.traceable and not is_tpu and not padding_mask.any():
    -            padding_mask = None
    -
    -        if token_embeddings is not None:
    -            x = token_embeddings
    -        else:
    -            x = self.embed_tokens(tokens)
    -
    -        if self.embed_scale is not None:
    -            x = x * self.embed_scale
    -
    -        if self.embed_positions is not None:
    -            x = x + self.embed_positions(tokens, positions=positions)
    -
    -        if self.segment_embeddings is not None and segment_labels is not None:
    -            x = x + self.segment_embeddings(segment_labels)
    -
    -        if self.quant_noise is not None:
    -            x = self.quant_noise(x)
    -
    -        if self.emb_layer_norm is not None:
    -            x = self.emb_layer_norm(x)
    -
    -        x = self.dropout_module(x)
    -
    -        # account for padding while computing the representation
    -        if padding_mask is not None:
    -            x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))
    -
    -        # B x T x C -> T x B x C
    -        x = x.transpose(0, 1)
    -
    -        inner_states = []
    -        if not last_state_only:
    -            inner_states.append(x)
    -
    -        for layer in self.layers:
    -            x, _ = layer(x, self_attn_padding_mask=padding_mask, self_attn_mask=attn_mask)
    -            if not last_state_only:
    -                inner_states.append(x)
    -
    -        sentence_rep = x[0, :, :]
    -
    -        if last_state_only:
    -            inner_states = [x]
    -
    -        if self.traceable:
    -            return torch.stack(inner_states), sentence_rep
    -        else:
    -            return inner_states, sentence_rep
    diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/tests/test_iopath.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/tests/test_iopath.py
    deleted file mode 100644
    index 908261a6619806f7ef9b5dd1beb5d6817b249a6e..0000000000000000000000000000000000000000
    --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/tests/test_iopath.py
    +++ /dev/null
    @@ -1,29 +0,0 @@
    -# Copyright (c) Facebook, Inc. and its affiliates.
    -#
    -# This source code is licensed under the MIT license found in the
    -# LICENSE file in the root directory of this source tree.
    -
    -import unittest
    -from unittest import mock
    -
    -
    -class TestIOPath(unittest.TestCase):
    -
    -    def test_no_iopath(self):
    -        from .test_reproducibility import TestReproducibility
    -
    -        with mock.patch.dict("sys.modules", {"iopath": None}):
    -            # reuse reproducibility tests, which are e2e tests that should cover
    -            # most checkpoint related functionality
    -            TestReproducibility._test_reproducibility(self, "test_reproducibility")
    -
    -    def test_no_supports_rename(self):
    -        from .test_reproducibility import TestReproducibility
    -
    -        with mock.patch("fairseq.file_io.PathManager.supports_rename") as mock_fn:
    -            mock_fn.return_value = False
    -            TestReproducibility._test_reproducibility(self, "test_reproducibility")
    -
    -
    -if __name__ == "__main__":
    -    unittest.main()
    diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/num_samples_dataset.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/num_samples_dataset.py
    deleted file mode 100644
    index 99a17495c701d8a05e0268f98bf453905e11d078..0000000000000000000000000000000000000000
    --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/num_samples_dataset.py
    +++ /dev/null
    @@ -1,17 +0,0 @@
    -# Copyright (c) Facebook, Inc. and its affiliates.
    -#
    -# This source code is licensed under the MIT license found in the
    -# LICENSE file in the root directory of this source tree.
    -
    -from . import FairseqDataset
    -
    -
    -class NumSamplesDataset(FairseqDataset):
    -    def __getitem__(self, index):
    -        return 1
    -
    -    def __len__(self):
    -        return 0
    -
    -    def collater(self, samples):
    -        return sum(samples)
    diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/tasks/audio_finetuning.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/tasks/audio_finetuning.py
    deleted file mode 100644
    index 4ef87c604f00581f03075e9ebe10a43dd51d6e45..0000000000000000000000000000000000000000
    --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/tasks/audio_finetuning.py
    +++ /dev/null
    @@ -1,346 +0,0 @@
    -# Copyright (c) 2017-present, Facebook, Inc.
    -# All rights reserved.
    -#
    -# This source code is licensed under the license found in the LICENSE file in
    -# the root directory of this source tree. An additional grant of patent rights
    -# can be found in the PATENTS file in the same directory.
    -
    -import logging
    -import os
    -import torch
    -import json
    -
    -from argparse import Namespace
    -from dataclasses import dataclass, field
    -from typing import Optional, Any
    -
    -from fairseq.data import AddTargetDataset, Dictionary, encoders
    -from fairseq.tasks.audio_pretraining import AudioPretrainingTask, AudioPretrainingConfig
    -from fairseq.dataclass import FairseqDataclass
    -from fairseq.dataclass.configs import GenerationConfig
    -from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel
    -
    -from . import register_task
    -from .. import utils
    -from ..logging import metrics
    -
    -
    -logger = logging.getLogger(__name__)
    -
    -
    -class LabelEncoder(object):
    -    def __init__(self, dictionary):
    -        self.dictionary = dictionary
    -
    -    def __call__(self, label):
    -        return self.dictionary.encode_line(
    -            label, append_eos=False, add_if_not_exist=False
    -        )
    -
    -
    -def label_len_fn(label):
    -    return len(label.split(" "))
    -
    -
    -@dataclass
    -class AudioFinetuningConfig(AudioPretrainingConfig):
    -    # Options for reporting WER metrics during validation. Only applicable to
    -    # Seq2Seq models during fine-tuning
    -    eval_wer: bool = field(
    -        default=False, metadata={"help": "compute WER for Seq2Seq models"}
    -    )
    -    eval_wer_config: GenerationConfig = field(
    -        default_factory=lambda: GenerationConfig(),
    -        metadata={"help": "beam search config for evaluating wer during training"},
    -    )
    -    eval_wer_tokenizer: Any = field(
    -        default=None,
    -        metadata={"help": "tokenizer config for evaluating wer during training"},
    -    )
    -    eval_wer_post_process: str = field(
    -        default="letter",
    -        metadata={
    -            "help": "remove BPE tokens before scoring (can be sentencepiece, letter, and more)"
    -        },
    -    )
    -    eval_bleu: bool = field(
    -        default=False, metadata={"help": "evaluation with BLEU scores"}
    -    )
    -    eval_bleu_detok: Optional[str] = field(
    -        default=None, metadata={
    -            "help": "detokenize before computing BLEU (e.g., 'moses'); "
    -                    "required if using --eval-bleu; use 'space' to disable "
    -                    "detokenization; see fairseq.data.encoders for other options"
    -        }
    -    )
    -    eval_bleu_detok_args: str = field(
    -        default="{}",
    -        metadata={"help": "args for building the tokenizer, if needed"}
    -    )
    -    eval_tokenized_bleu: bool = field(
    -        default=False,
    -        metadata={"help": "compute tokenized BLEU instead of sacrebleu"}
    -    )
    -    eval_bleu_remove_bpe: Optional[str] = field(
    -        default=None, metadata={"help": "remove BPE before computing BLEU"}
    -    )
    -    eval_bleu_args: str = field(
    -        default="{}",
    -        metadata={"help": "generation args for BLUE scoring, e.g., "
    -                          "'{\"beam\": 4, \"lenpen\": 0.6}'"}
    -    )
    -    eval_bleu_print_samples: bool = field(
    -        default=False,
    -        metadata={"help": "print sample generations during validation"}
    -    )
    -    autoregressive: bool = field(
    -        default=False,
    -        metadata={
    -            "help": "required for autoregressive decoders (like seq2seq models); "
    -            "adds 'prev_output_tokens' to input and appends eos to target"
    -        },
    -    )
    -
    -
    -@register_task("audio_finetuning", dataclass=AudioFinetuningConfig)
    -class AudioFinetuningTask(AudioPretrainingTask):
    -    """ """
    -
    -    cfg: AudioFinetuningConfig
    -
    -    def __init__(
    -        self,
    -        cfg: AudioFinetuningConfig,
    -    ):
    -        super().__init__(cfg)
    -        self.blank_symbol = ""
    -
    -        self.state.add_factory("target_dictionary", self.load_target_dictionary)
    -
    -    def load_target_dictionary(self):
    -        if self.cfg.labels:
    -            dict_path = os.path.join(self.cfg.data, f"dict.{self.cfg.labels}.txt")
    -            return Dictionary.load(dict_path)
    -        return None
    -
    -    def load_dataset(self, split: str, task_cfg: AudioFinetuningConfig = None, **kwargs):
    -        super().load_dataset(split, task_cfg, **kwargs)
    -
    -        task_cfg = task_cfg or self.cfg
    -        assert task_cfg.labels is not None
    -        text_compression_level = getattr(
    -            TextCompressionLevel, str(self.cfg.text_compression_level)
    -        )
    -        data_path = self.cfg.data
    -        label_path = os.path.join(data_path, f"{split}.{task_cfg.labels}")
    -        skipped_indices = getattr(self.datasets[split], "skipped_indices", set())
    -        text_compressor = TextCompressor(level=text_compression_level)
    -        with open(label_path, "r") as f:
    -            labels = [
    -                text_compressor.compress(l)
    -                for i, l in enumerate(f) if i not in skipped_indices
    -            ]
    -
    -        assert len(labels) == len(self.datasets[split]), (
    -            f"labels length ({len(labels)}) and dataset length "
    -            f"({len(self.datasets[split])}) do not match"
    -        )
    -
    -        process_label = LabelEncoder(self.target_dictionary)
    -
    -        self.datasets[split] = AddTargetDataset(
    -            self.datasets[split],
    -            labels,
    -            pad=self.target_dictionary.pad(),
    -            eos=self.target_dictionary.eos(),
    -            batch_targets=True,
    -            process_label=process_label,
    -            label_len_fn=label_len_fn,
    -            add_to_input=task_cfg.get("autoregressive", False),
    -            text_compression_level=text_compression_level
    -        )
    -
    -    @property
    -    def target_dictionary(self):
    -        """Return the :class:`~fairseq.data.Dictionary` for the language
    -        model."""
    -        return self.state.target_dictionary
    -
    -    def valid_step(self, sample, model, criterion):
    -        loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
    -        if self.cfg.eval_wer and self.cfg.autoregressive:
    -            metrics = self._inference_with_wer(self.sequence_generator, sample, model)
    -            logging_output["_num_char_errors"] = metrics["num_char_errors"]
    -            logging_output["_num_chars"] = metrics["num_chars"]
    -            logging_output["_num_word_errors"] = metrics["num_word_errors"]
    -            logging_output["_num_words"] = metrics["num_words"]
    -        if self.cfg.eval_bleu and self.cfg.autoregressive:
    -            metrics = self._inference_with_bleu(self.sequence_generator, sample, model)
    -            logging_output['_bleu_sys_len'] = metrics.sys_len
    -            logging_output['_bleu_ref_len'] = metrics.ref_len
    -            # we split counts into separate entries so that they can be
    -            # summed efficiently across workers using fast-stat-sync
    -            assert len(metrics.counts) == 4
    -            for i in range(4):
    -                logging_output[f"_bleu_counts_{i}"] = metrics.counts[i]
    -                logging_output[f"_bleu_totals_{i}"] = metrics.totals[i]
    -        return loss, sample_size, logging_output
    -
    -    def build_model(self, model_cfg: FairseqDataclass):
    -        model = super().build_model(model_cfg)
    -
    -        if self.cfg.eval_wer and self.cfg.autoregressive:
    -            self.sequence_generator = self.build_generator(
    -                [model],
    -                self.cfg.eval_wer_config,
    -            )
    -            if self.cfg.eval_wer_tokenizer:
    -                self.tokenizer = encoders.build_tokenizer(self.cfg.eval_wer_tokenizer)
    -            else:
    -                self.tokenizer = None
    -        if self.cfg.eval_bleu and self.cfg.autoregressive:
    -            assert self.cfg.eval_bleu_detok is not None, (
    -                '--eval-bleu-detok is required if using --eval-bleu; '
    -                'try --eval-bleu-detok=moses (or --eval-bleu-detok=space '
    -                'to disable detokenization, e.g., when using sentencepiece)'
    -            )
    -            detok_args = json.loads(self.cfg.eval_bleu_detok_args)
    -            self.tokenizer = encoders.build_tokenizer(
    -                Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args)
    -            )
    -            gen_args = json.loads(self.cfg.eval_bleu_args)
    -            gen_args = Namespace(**gen_args)
    -            self.sequence_generator = self.build_generator([model], gen_args)
    -
    -        return model
    -
    -    def _inference_with_wer(self, generator, sample, model):
    -        import editdistance
    -
    -        def decode(toks):
    -            s = self.target_dictionary.string(
    -                toks.int().cpu(),
    -                self.cfg.eval_wer_post_process,
    -                escape_unk=True,
    -            )
    -            if self.tokenizer:
    -                s = self.tokenizer.decode(s)
    -            return s
    -
    -        num_word_errors, num_char_errors = 0, 0
    -        num_chars, num_words = 0, 0
    -        gen_out = self.inference_step(generator, [model], sample, None)
    -        for i in range(len(gen_out)):
    -            hyp = decode(gen_out[i][0]["tokens"])
    -            ref = decode(
    -                utils.strip_pad(sample["target"][i], self.target_dictionary.pad()),
    -            )
    -            num_char_errors += editdistance.eval(hyp, ref)
    -            num_chars += len(ref)
    -            hyp_words = hyp.split()
    -            ref_words = ref.split()
    -            num_word_errors += editdistance.eval(hyp_words, ref_words)
    -            num_words += len(ref_words)
    -
    -        return {
    -            "num_char_errors": num_char_errors,
    -            "num_chars": num_chars,
    -            "num_word_errors": num_word_errors,
    -            "num_words": num_words,
    -        }
    -
    -    def _inference_with_bleu(self, generator, sample, model):
    -        import sacrebleu
    -
    -        def decode(toks, is_ref):
    -            s = self.target_dictionary.string(
    -                toks.int().cpu(),
    -                self.cfg.eval_bleu_remove_bpe,
    -                # The default unknown string in fairseq is ``, but
    -                # this is tokenized by sacrebleu as `< unk >`, inflating
    -                # BLEU scores. Instead, we use a somewhat more verbose
    -                # alternative that is unlikely to appear in the real
    -                # reference, but doesn't get split into multiple tokens.
    -                unk_string=(
    -                    "UNKNOWNTOKENINREF" if is_ref else "UNKNOWNTOKENINHYP"
    -                ),
    -            )
    -            if self.tokenizer:
    -                s = self.tokenizer.decode(s)
    -            return s
    -
    -        gen_out = self.inference_step(generator, [model], sample)
    -        hyps, refs = [], []
    -        for i in range(len(gen_out)):
    -            hyps.append(decode(gen_out[i][0]['tokens'], is_ref=False))
    -            refs.append(
    -                decode(
    -                    utils.strip_pad(
    -                        sample['target'][i],
    -                        self.target_dictionary.pad()
    -                    ),
    -                    is_ref=True,  # don't count  as matches to the hypo
    -                )
    -            )
    -        if self.cfg.eval_bleu_print_samples:
    -            logger.info('H-{} {}'.format(sample["id"][0], hyps[0]))
    -            logger.info('T-{} {}'.format(sample["id"][0], refs[0]))
    -
    -        eval_tokenization = 'none' if self.cfg.eval_tokenized_bleu else '13a'
    -        return sacrebleu.corpus_bleu(hyps, [refs], tokenize=eval_tokenization)
    -
    -    def reduce_metrics(self, logging_outputs, criterion):
    -        super().reduce_metrics(logging_outputs, criterion)
    -
    -        if self.cfg.eval_wer:
    -            zero = torch.scalar_tensor(0.0)
    -            num_char_errors = sum(
    -                log.get("_num_char_errors", zero) for log in logging_outputs
    -            )
    -            num_chars = sum(log.get("_num_chars", zero) for log in logging_outputs)
    -            num_word_errors = sum(
    -                log.get("_num_word_errors", zero) for log in logging_outputs
    -            )
    -            num_words = sum(log.get("_num_words", zero) for log in logging_outputs)
    -            metrics.log_scalar("_num_char_errors", num_char_errors)
    -            metrics.log_scalar("_num_chars", num_chars)
    -            metrics.log_scalar("_num_word_errors", num_word_errors)
    -            metrics.log_scalar("_num_words", num_words)
    -            if num_chars > 0:
    -                metrics.log_derived(
    -                    "uer",
    -                    lambda meters: meters["_num_char_errors"].sum
    -                    * 100.0
    -                    / meters["_num_chars"].sum
    -                    if meters["_num_chars"].sum > 0
    -                    else float("nan"),
    -                )
    -            if num_words > 0:
    -                metrics.log_derived(
    -                    "wer",
    -                    lambda meters: meters["_num_word_errors"].sum
    -                    * 100.0
    -                    / meters["_num_words"].sum
    -                    if meters["_num_words"].sum > 0
    -                    else float("nan"),
    -                )
    -        if self.cfg.eval_bleu:
    -            len_keys = ["_bleu_sys_len", "_bleu_ref_len"]
    -            count_keys = [f"_bleu_counts_{i}" for i in range(4)]
    -            total_keys = [f"_bleu_totals_{i}" for i in range(4)]
    -            for k in len_keys + count_keys + total_keys:
    -                metrics.log_scalar(
    -                    k, sum(log.get(k, 0) for log in logging_outputs)
    -                )
    -
    -            import sacrebleu
    -            metrics.log_derived(
    -                'bleu',
    -                lambda meters: sacrebleu.compute_bleu(
    -                    correct=[meters[k].sum for k in count_keys],
    -                    total=[meters[k].sum for k in total_keys],
    -                    sys_len=meters['_bleu_sys_len'].sum,
    -                    ref_len=meters['_bleu_ref_len'].sum,
    -                    smooth_method="exp"
    -                ).score
    -            )
    diff --git a/spaces/OpenDILabCommunity/LLMRiddlesChatGPTCN/llmriddles/questions/__init__.py b/spaces/OpenDILabCommunity/LLMRiddlesChatGPTCN/llmriddles/questions/__init__.py
    deleted file mode 100644
    index 6904d13a3d820f39b4b54d09671673e49a06aa4c..0000000000000000000000000000000000000000
    --- a/spaces/OpenDILabCommunity/LLMRiddlesChatGPTCN/llmriddles/questions/__init__.py
    +++ /dev/null
    @@ -1,13 +0,0 @@
    -from .executor import QuestionExecutor
    -from .level1 import __file__ as _level1_file_
    -from .level2 import __file__ as _level2_file_
    -from .level3 import __file__ as _level3_file_
    -from .level4 import __file__ as _level4_file_
    -from .level5 import __file__ as _level5_file_
    -from .question import Question, register_question, list_ordered_questions
    -
    -_ = _level1_file_
    -_ = _level2_file_
    -_ = _level3_file_
    -_ = _level4_file_
    -_ = _level5_file_
    \ No newline at end of file
    diff --git a/spaces/OpenMotionLab/MotionGPT/mGPT/metrics/m2m.py b/spaces/OpenMotionLab/MotionGPT/mGPT/metrics/m2m.py
    deleted file mode 100644
    index 780d34dadf7d9b01369884b1ae3e5a6e0c6bceb3..0000000000000000000000000000000000000000
    --- a/spaces/OpenMotionLab/MotionGPT/mGPT/metrics/m2m.py
    +++ /dev/null
    @@ -1,95 +0,0 @@
    -from typing import List
    -
    -import torch
    -from torch import Tensor
    -from torchmetrics import Metric
    -
    -from .utils import *
    -
    -
    -# motion reconstruction metric
    -class PredMetrics(Metric):
    -
    -    def __init__(self,
    -                 cfg,
    -                 njoints: int = 22,
    -                 jointstype: str = "mmm",
    -                 force_in_meter: bool = True,
    -                 align_root: bool = True,
    -                 dist_sync_on_step=True,
    -                 task: str = "pred",
    -                 **kwargs):
    -        super().__init__(dist_sync_on_step=dist_sync_on_step)
    -
    -        self.name = 'Motion Prdiction'
    -        self.cfg = cfg
    -        self.jointstype = jointstype
    -        self.align_root = align_root
    -        self.task = task
    -        self.force_in_meter = force_in_meter
    -
    -        self.add_state("count", default=torch.tensor(0), dist_reduce_fx="sum")
    -        self.add_state("count_seq",
    -                       default=torch.tensor(0),
    -                       dist_reduce_fx="sum")
    -
    -        self.add_state("APD",
    -                       default=torch.tensor([0.0]),
    -                       dist_reduce_fx="sum")
    -        self.add_state("ADE",
    -                       default=torch.tensor([0.0]),
    -                       dist_reduce_fx="sum")
    -        self.add_state("FDE",
    -                       default=torch.tensor([0.0]),
    -                       dist_reduce_fx="sum")
    -
    -        self.MR_metrics = ["APD", "ADE", "FDE"]
    -
    -        # All metric
    -        self.metrics = self.MR_metrics
    -
    -    def compute(self, sanity_flag):
    -
    -        count = self.count
    -        count_seq = self.count_seq
    -        mr_metrics = {}
    -        mr_metrics["APD"] = self.APD / count_seq 
    -        mr_metrics["ADE"] = self.ADE / count_seq
    -        mr_metrics["FDE"] = self.FDE / count_seq
    -        
    -        # Reset
    -        self.reset()
    -        
    -        return mr_metrics
    -
    -    def update(self, joints_rst: Tensor, joints_ref: Tensor,
    -               lengths: List[int]):
    -        
    -        assert joints_rst.shape == joints_ref.shape
    -        assert joints_rst.dim() == 4
    -        # (bs, seq, njoint=22, 3)
    -
    -        self.count += sum(lengths)
    -        self.count_seq += len(lengths)
    -
    -        rst = torch.flatten(joints_rst, start_dim=2)
    -        ref = torch.flatten(joints_ref, start_dim=2)
    -        
    -        for i, l in enumerate(lengths):
    -            if self.task == "pred":
    -                pred_start = int(l*self.cfg.ABLATION.predict_ratio)
    -                diff = rst[i,pred_start:] - ref[i,pred_start:]
    -            elif self.task == "inbetween":
    -                inbetween_start = int(l*self.cfg.ABLATION.inbetween_ratio)
    -                inbetween_end = l - int(l*self.cfg.ABLATION.inbetween_ratio)
    -                diff = rst[i,inbetween_start:inbetween_end] - ref[i,inbetween_start:inbetween_end]
    -            else:
    -                print(f"Task {self.task} not implemented.")
    -                diff = rst - ref
    -            
    -            dist = torch.linalg.norm(diff, dim=-1)[None]
    -
    -            ade = dist.mean(dim=1)
    -            fde = dist[:,-1]
    -            self.ADE = self.ADE + ade
    -            self.FDE = self.FDE + fde
    diff --git a/spaces/OpenMotionLab/MotionGPT/pyrender/tests/unit/__init__.py b/spaces/OpenMotionLab/MotionGPT/pyrender/tests/unit/__init__.py
    deleted file mode 100644
    index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
    diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/psa_mask.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/psa_mask.py
    deleted file mode 100644
    index cdf14e62b50e8d4dd6856c94333c703bcc4c9ab6..0000000000000000000000000000000000000000
    --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/psa_mask.py
    +++ /dev/null
    @@ -1,92 +0,0 @@
    -# Modified from https://github.com/hszhao/semseg/blob/master/lib/psa
    -from torch import nn
    -from torch.autograd import Function
    -from torch.nn.modules.utils import _pair
    -
    -from ..utils import ext_loader
    -
    -ext_module = ext_loader.load_ext('_ext',
    -                                 ['psamask_forward', 'psamask_backward'])
    -
    -
    -class PSAMaskFunction(Function):
    -
    -    @staticmethod
    -    def symbolic(g, input, psa_type, mask_size):
    -        return g.op(
    -            'mmcv::MMCVPSAMask',
    -            input,
    -            psa_type_i=psa_type,
    -            mask_size_i=mask_size)
    -
    -    @staticmethod
    -    def forward(ctx, input, psa_type, mask_size):
    -        ctx.psa_type = psa_type
    -        ctx.mask_size = _pair(mask_size)
    -        ctx.save_for_backward(input)
    -
    -        h_mask, w_mask = ctx.mask_size
    -        batch_size, channels, h_feature, w_feature = input.size()
    -        assert channels == h_mask * w_mask
    -        output = input.new_zeros(
    -            (batch_size, h_feature * w_feature, h_feature, w_feature))
    -
    -        ext_module.psamask_forward(
    -            input,
    -            output,
    -            psa_type=psa_type,
    -            num_=batch_size,
    -            h_feature=h_feature,
    -            w_feature=w_feature,
    -            h_mask=h_mask,
    -            w_mask=w_mask,
    -            half_h_mask=(h_mask - 1) // 2,
    -            half_w_mask=(w_mask - 1) // 2)
    -        return output
    -
    -    @staticmethod
    -    def backward(ctx, grad_output):
    -        input = ctx.saved_tensors[0]
    -        psa_type = ctx.psa_type
    -        h_mask, w_mask = ctx.mask_size
    -        batch_size, channels, h_feature, w_feature = input.size()
    -        grad_input = grad_output.new_zeros(
    -            (batch_size, channels, h_feature, w_feature))
    -        ext_module.psamask_backward(
    -            grad_output,
    -            grad_input,
    -            psa_type=psa_type,
    -            num_=batch_size,
    -            h_feature=h_feature,
    -            w_feature=w_feature,
    -            h_mask=h_mask,
    -            w_mask=w_mask,
    -            half_h_mask=(h_mask - 1) // 2,
    -            half_w_mask=(w_mask - 1) // 2)
    -        return grad_input, None, None, None
    -
    -
    -psa_mask = PSAMaskFunction.apply
    -
    -
    -class PSAMask(nn.Module):
    -
    -    def __init__(self, psa_type, mask_size=None):
    -        super(PSAMask, self).__init__()
    -        assert psa_type in ['collect', 'distribute']
    -        if psa_type == 'collect':
    -            psa_type_enum = 0
    -        else:
    -            psa_type_enum = 1
    -        self.psa_type_enum = psa_type_enum
    -        self.mask_size = mask_size
    -        self.psa_type = psa_type
    -
    -    def forward(self, input):
    -        return psa_mask(input, self.psa_type_enum, self.mask_size)
    -
    -    def __repr__(self):
    -        s = self.__class__.__name__
    -        s += f'(psa_type={self.psa_type}, '
    -        s += f'mask_size={self.mask_size})'
    -        return s
    diff --git a/spaces/PSMdata/langchain-llama2-7b-chat/app.py b/spaces/PSMdata/langchain-llama2-7b-chat/app.py
    deleted file mode 100644
    index cd31257a825ae4a198b723a192eed8e6357631b3..0000000000000000000000000000000000000000
    --- a/spaces/PSMdata/langchain-llama2-7b-chat/app.py
    +++ /dev/null
    @@ -1,554 +0,0 @@
    -"""Run codes."""
    -# pylint: disable=line-too-long, broad-exception-caught, invalid-name, missing-function-docstring, too-many-instance-attributes, missing-class-docstring
    -# ruff: noqa: E501
    -import gc
    -import os
    -import platform
    -import random
    -import time
    -from collections import deque
    -from pathlib import Path
    -from threading import Thread
    -from typing import Any, Dict, List, Union
    -
    -# from types import SimpleNamespace
    -import gradio as gr
    -import psutil
    -from about_time import about_time
    -from ctransformers import Config
    -from dl_hf_model import dl_hf_model
    -from langchain.callbacks.base import BaseCallbackHandler
    -from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
    -from langchain.chains import ConversationChain
    -from langchain.chains.conversation.memory import ConversationBufferWindowMemory
    -
    -# from ctransformers import AutoModelForCausalLM
    -from langchain.llms import CTransformers
    -from langchain.prompts import PromptTemplate
    -from langchain.schema import LLMResult
    -from loguru import logger
    -
    -deq = deque()
    -sig_end = object()  # signals the processing is done
    -
    -# from langchain.llms import OpenAI
    -
    -filename_list = [
    -    "Wizard-Vicuna-7B-Uncensored.ggmlv3.q2_K.bin",
    -    "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_L.bin",
    -    "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_M.bin",
    -    "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_S.bin",
    -    "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_0.bin",
    -    "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_1.bin",
    -    "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin",
    -    "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_S.bin",
    -    "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_0.bin",
    -    "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_1.bin",
    -    "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_M.bin",
    -    "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_S.bin",
    -    "Wizard-Vicuna-7B-Uncensored.ggmlv3.q6_K.bin",
    -    "Wizard-Vicuna-7B-Uncensored.ggmlv3.q8_0.bin",
    -]
    -
    -URL = "https://huggingface.co/TheBloke/Wizard-Vicuna-7B-Uncensored-GGML/raw/main/Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin"  # 4.05G
    -
    -url = "https://huggingface.co/savvamadar/ggml-gpt4all-j-v1.3-groovy/blob/main/ggml-gpt4all-j-v1.3-groovy.bin"
    -url = "https://huggingface.co/TheBloke/Llama-2-13B-GGML/blob/main/llama-2-13b.ggmlv3.q4_K_S.bin"  # 7.37G
    -# url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.bin"
    -url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.bin"  # 6.93G
    -# url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.binhttps://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q4_K_M.bin"  # 7.87G
    -
    -url = "https://huggingface.co/localmodels/Llama-2-13B-Chat-ggml/blob/main/llama-2-13b-chat.ggmlv3.q4_K_S.bin"  # 7.37G
    -
    -_ = (
    -    "golay" in platform.node()
    -    or "okteto" in platform.node()
    -    or Path("/kaggle").exists()
    -    # or psutil.cpu_count(logical=False) < 4
    -    or 1  # run 7b in hf
    -)
    -
    -if _:
    -    # url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q2_K.bin"
    -    url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q2_K.bin"  # 2.87G
    -    url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q4_K_M.bin"  # 2.87G
    -    url = "https://huggingface.co/TheBloke/llama2_7b_chat_uncensored-GGML/blob/main/llama2_7b_chat_uncensored.ggmlv3.q4_K_M.bin"  # 4.08G
    -
    -
    -prompt_template = """Below is an instruction that describes a task. Write a response that appropriately completes the request.
    -
    -### Instruction: {user_prompt}
    -
    -### Response:
    -"""
    -
    -prompt_template = """System: You are a helpful,
    -respectful and honest assistant. Always answer as
    -helpfully as possible, while being safe.  Your answers
    -should not include any harmful, unethical, racist,
    -sexist, toxic, dangerous, or illegal content. Please
    -ensure that your responses are socially unbiased and
    -positive in nature. If a question does not make any
    -sense, or is not factually coherent, explain why instead
    -of answering something not correct. If you don't know
    -the answer to a question, please don't share false
    -information.
    -User: {prompt}
    -Assistant: """
    -
    -prompt_template = """System: You are a helpful assistant.
    -User: {prompt}
    -Assistant: """
    -
    -prompt_template = """Question: {question}
    -Answer: Let's work this out in a step by step way to be sure we have the right answer."""
    -
    -prompt_template = """[INST] <>
    -You are a helpful, respectful and honest assistant. Always answer as helpfully as possible assistant. Think step by step.
    -<>
    -
    -What NFL team won the Super Bowl in the year Justin Bieber was born?
    -[/INST]"""
    -
    -prompt_template = """[INST] <>
    -You are an unhelpful assistant. Always answer as helpfully as possible. Think step by step. <>
    -
    -{question} [/INST]
    -"""
    -
    -prompt_template = """[INST] <>
    -You are a helpful assistant.
    -<>
    -
    -{question} [/INST]
    -"""
    -
    -prompt_template = """### HUMAN:
    -{question}
    -
    -### RESPONSE:"""
    -
    -prompt_template = """### HUMAN:
    -You are a helpful assistant. Think step by step.
    -{history}
    -{input}
    -### RESPONSE:"""
    -
    -prompt_template = """You are a helpful assistant. Let's think step by step.
    -{history}
    -### HUMAN:
    -{input}
    -### RESPONSE:"""
    -
    -# PromptTemplate(input_variables=['history', 'input'], output_parser=None, partial_variables={}, template='The following is afriendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n{history}\nHuman: {input}\nAI:', template_format='f-string', validate_template=True)
    -
    -human_prefix = "### HUMAN"
    -ai_prefix = "### RESPONSE"
    -stop = [f"{human_prefix}:"]
    -
    -_ = [elm for elm in prompt_template.splitlines() if elm.strip()]
    -stop_string = [elm.split(":")[0] + ":" for elm in _][-2]
    -
    -# logger.debug(f"{stop_string=} not used")
    -
    -os.environ["TZ"] = "Asia/Shanghai"
    -try:
    -    time.tzset()  # type: ignore # pylint: disable=no-member
    -except Exception:
    -    # Windows
    -    logger.warning("Windows, cant run time.tzset()")
    -
    -
    -class DequeCallbackHandler(BaseCallbackHandler):
    -    """Mediate gradio and stream output."""
    -
    -    def __init__(self, deq_: deque):
    -        """Init deque for FIFO, may need to upgrade to queue.Queue or queue.SimpleQueue."""
    -        self.q = deq_
    -
    -    # def on_chat_model_start(self): self.q.clear()
    -
    -    def on_llm_start(
    -        self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
    -    ) -> None:
    -        """Run when LLM starts running. Clean the queue."""
    -        self.q.clear()
    -
    -    def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
    -        """Run on new LLM token. Only available when streaming is enabled."""
    -        self.q.append(token)
    -
    -    def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
    -        """Run when LLM ends running."""
    -        self.q.append(sig_end)
    -
    -    def on_llm_error(
    -        self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
    -    ) -> None:
    -        """Run when LLM errors."""
    -        self.q.append(sig_end)
    -
    -
    -_ = psutil.cpu_count(logical=False) - 1
    -cpu_count: int = int(_) if _ else 1
    -logger.debug(f"{cpu_count=}")
    -
    -LLM = None
    -gc.collect()
    -
    -try:
    -    model_loc, file_size = dl_hf_model(url)
    -except Exception as exc_:
    -    logger.error(exc_)
    -    raise SystemExit(1) from exc_
    -
    -config = Config()
    -# Config(top_k=40, top_p=0.95, temperature=0.8, repetition_penalty=1.1, last_n_tokens=64, seed=-1, batch_size=8, threads=-1, max_new_tokens=256, stop=None, stream=False, reset=True, context_length=-1, gpu_layers=0)
    -config.stream = True
    -config.stop = stop
    -config.threads = cpu_count
    -
    -deqcb = DequeCallbackHandler(deq)
    -
    -# LLM = AutoModelForCausalLM.from_pretrained(
    -LLM = CTransformers(
    -    model=model_loc,
    -    model_type="llama",
    -    callbacks=[StreamingStdOutCallbackHandler(), deqcb],
    -    # config=config,
    -    **vars(config),
    -)
    -
    -logger.info(f"done load llm {model_loc=} {file_size=}G")
    -
    -prompt = PromptTemplate(
    -    input_variables=["history", "input"],
    -    output_parser=None,
    -    partial_variables={},
    -    template=prompt_template,
    -    template_format="f-string",
    -    validate_template=True,
    -)
    -
    -memory = ConversationBufferWindowMemory(
    -    human_prefix=human_prefix,
    -    ai_prefix=ai_prefix,
    -)  # default k=5
    -
    -conversation = ConversationChain(
    -    llm=LLM,
    -    prompt=prompt,
    -    memory=memory,
    -    verbose=True,
    -)
    -logger.debug(f"{conversation.prompt.template=}")  # type: ignore
    -
    -# for api access ===
    -config = Config()
    -# Config(top_k=40, top_p=0.95, temperature=0.8, repetition_penalty=1.1, last_n_tokens=64, seed=-1, batch_size=8, threads=-1, max_new_tokens=256, stop=None, stream=False, reset=True, context_length=-1, gpu_layers=0)
    -config.stop = stop
    -config.threads = cpu_count
    -
    -try:
    -    LLM_api = CTransformers(
    -        model=model_loc,
    -        model_type="llama",
    -        # callbacks=[StreamingStdOutCallbackHandler(), deqcb],
    -        callbacks=[StreamingStdOutCallbackHandler()],
    -        **vars(config),
    -    )
    -    conversation_api = ConversationChain(
    -        llm=LLM_api,  #  need a separate LLM, or else deq may be messed up
    -        prompt=prompt,
    -        verbose=True,
    -    )
    -except Exception as exc_:
    -    logger.error(exc_)
    -    conversation_api = None
    -    logger.warning("Not able to instantiate conversation_api, api will not work")
    -
    -# conversation.predict(input="Hello, my name is Andrea")
    -
    -
    -def user(user_message, history):
    -    # return user_message, history + [[user_message, None]]
    -    history.append([user_message, None])
    -    return user_message, history  # keep user_message
    -
    -
    -def user1(user_message, history):
    -    # return user_message, history + [[user_message, None]]
    -    history.append([user_message, None])
    -    return "", history  # clear user_message
    -
    -
    -def bot_(history):
    -    user_message = history[-1][0]
    -    resp = random.choice(["How are you?", "I love you", "I'm very hungry"])
    -    bot_message = user_message + ": " + resp
    -    history[-1][1] = ""
    -    for character in bot_message:
    -        history[-1][1] += character
    -        time.sleep(0.02)
    -        yield history
    -
    -    history[-1][1] = resp
    -    yield history
    -
    -
    -def bot(history):
    -    user_message = history[-1][0]
    -    response = []
    -
    -    logger.debug(f"{user_message=}")
    -
    -    # conversation.predict(input="What's my name?")
    -    thr = Thread(target=conversation.predict, kwargs={"input": user_message})
    -    thr.start()
    -
    -    # preocess deq
    -    response = []
    -    flag = 1
    -    then = time.time()
    -    prefix = ""  # to please pyright
    -    with about_time() as atime:  # type: ignore
    -        while True:
    -            if deq:
    -                if flag:
    -                    prefix = f"({time.time() - then:.2f}s) "
    -                    flag = 0
    -                _ = deq.popleft()
    -                if _ is sig_end:
    -                    break
    -                # print(_, end='')
    -                response.append(_)
    -                history[-1][1] = prefix + "".join(response).strip()
    -                yield history
    -            else:
    -                time.sleep(0.01)
    -    _ = (
    -        f"(time elapsed: {atime.duration_human}, "  # type: ignore
    -        f"{atime.duration/len(''.join(response)):.2f}s/char)"  # type: ignore
    -    )
    -
    -    history[-1][1] = "".join(response) + f"\n{_}"
    -    yield history
    -
    -
    -def predict_api(user_prompt):
    -    if conversation_api is None:
    -        return "conversation_api is None, probably due to insufficient memory, api not usable"
    -
    -    logger.debug(f"api: {user_prompt=}")
    -    try:
    -        _ = """
    -        response = generate(
    -            prompt,
    -            config=config,
    -        )
    -        # """
    -        response = conversation_api.predict(input=user_prompt)
    -        logger.debug(f"api: {response=}")
    -    except Exception as exc:
    -        logger.error(exc)
    -        response = f"{exc=}"
    -    # bot = {"inputs": [response]}
    -    # bot = [(prompt, response)]
    -
    -    return response.strip()
    -
    -
    -css = """
    -    .importantButton {
    -        background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important;
    -        border: none !important;
    -    }
    -    .importantButton:hover {
    -        background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important;
    -        border: none !important;
    -    }
    -    .disclaimer {font-variant-caps: all-small-caps; font-size: xx-small;}
    -    .xsmall {font-size: x-small;}
    -"""
    -etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """
    -examples_list = [
    -    ["Hello I am mike."],
    -    ["What's my name?"],
    -    ["What NFL team won the Super Bowl in the year Justin Bieber was born?"],
    -    [
    -        "What NFL team won the Super Bowl in the year Justin Bieber was born? Think step by step."
    -    ],
    -    ["When was Justin Bieber born?"],
    -    ["What NFL team won the Super Bowl in 1994?"],
    -    ["How to pick a lock? Provide detailed steps."],
    -    [
    -        "If it takes 10 hours to dry 10 clothes,  assuming all the clothes are hanged together at the same time for drying , then how long will it take to dry a cloth?"
    -    ],
    -    ["is infinity + 1 bigger than infinity?"],
    -    ["Explain the plot of Cinderella in a sentence."],
    -    [
    -        "How long does it take to become proficient in French, and what are the best methods for retaining information?"
    -    ],
    -    ["What are some common mistakes to avoid when writing code?"],
    -    ["Build a prompt to generate a beautiful portrait of a horse"],
    -    ["Suggest four metaphors to describe the benefits of AI"],
    -    ["Write a pop song about leaving home for the sandy beaches."],
    -    ["Write a pop song about having hot sex on a sandy beach."],
    -    ["Write a summary demonstrating my ability to tame lions"],
    -    ["鲁迅和周树人什么关系? 说中文。"],
    -    ["鲁迅和周树人什么关系?"],
    -    ["鲁迅和周树人什么关系? 用英文回答。"],
    -    ["从前有一头牛,这头牛后面有什么?"],
    -    ["正无穷大加一大于正无穷大吗?"],
    -    ["正无穷大加正无穷大大于正无穷大吗?"],
    -    ["-2的平方根等于什么?"],
    -    ["树上有5只鸟,猎人开枪打死了一只。树上还有几只鸟?"],
    -    ["树上有11只鸟,猎人开枪打死了一只。树上还有几只鸟?提示:需考虑鸟可能受惊吓飞走。"],
    -    ["以红楼梦的行文风格写一张委婉的请假条。不少于320字。"],
    -    [f"{etext} 翻成中文,列出3个版本。"],
    -    [f"{etext} \n 翻成中文,保留原意,但使用文学性的语言。不要写解释。列出3个版本。"],
    -    ["假定 1 + 2 = 4, 试求 7 + 8。"],
    -    ["给出判断一个数是不是质数的 javascript 码。"],
    -    ["给出实现python 里 range(10)的 javascript 码。"],
    -    ["给出实现python 里 [*(range(10)]的 javascript 码。"],
    -    ["Erkläre die Handlung von Cinderella in einem Satz."],
    -    ["Erkläre die Handlung von Cinderella in einem Satz. Auf Deutsch."],
    -]
    -
    -logger.info("start block")
    -
    -with gr.Blocks(
    -    title=f"{Path(model_loc).name}",
    -    theme=gr.themes.Soft(text_size="sm", spacing_size="sm"),
    -    css=css,
    -) as block:
    -    # buff_var = gr.State("")
    -    with gr.Accordion("🎈 Info", open=False):
    -        # gr.HTML(
    -        #     """
    Duplicate and spin a CPU UPGRADE to avoid the queue
    """ - # ) - gr.Markdown( - f"""
    {Path(model_loc).name}
    - The bot can conduct multi-turn conversations, i.e. it remembers past dialogs. The process time is longer. - It typically takes about 120 seconds for the first response to appear. - - Most examples are meant for another model. - You probably should try to test - some related prompts.""", - elem_classes="xsmall", - ) - - chatbot = gr.Chatbot(height=500) - - with gr.Row(): - with gr.Column(scale=5): - msg = gr.Textbox( - label="Chat Message Box", - placeholder="Ask me anything (press Shift+Enter or click Submit to send)", - show_label=False, - # container=False, - lines=6, - max_lines=30, - show_copy_button=True, - # ).style(container=False) - ) - with gr.Column(scale=1, min_width=50): - with gr.Row(): - submit = gr.Button("Submit", elem_classes="xsmall") - stop = gr.Button("Stop", visible=True) - clear = gr.Button("Clear History", visible=True) - with gr.Row(visible=False): - with gr.Accordion("Advanced Options:", open=False): - with gr.Row(): - with gr.Column(scale=2): - system = gr.Textbox( - label="System Prompt", - value=prompt_template, - show_label=False, - container=False, - # ).style(container=False) - ) - with gr.Column(): - with gr.Row(): - change = gr.Button("Change System Prompt") - reset = gr.Button("Reset System Prompt") - - with gr.Accordion("Example Inputs", open=True): - examples = gr.Examples( - examples=examples_list, - inputs=[msg], - examples_per_page=40, - ) - - with gr.Accordion("Disclaimer", open=False): - _ = Path(model_loc).name - gr.Markdown( - f"Disclaimer: {_} can produce factually incorrect output, and should not be relied on to produce " - "factually accurate information. {_} was trained on various public datasets; while great efforts " - "have been taken to clean the pretraining data, it is possible that this model could generate lewd, " - "biased, or otherwise offensive outputs.", - elem_classes=["disclaimer"], - ) - - msg_submit_event = msg.submit( - # fn=conversation.user_turn, - fn=user, - inputs=[msg, chatbot], - outputs=[msg, chatbot], - queue=True, - show_progress="full", - # api_name=None, - ).then(bot, chatbot, chatbot, queue=True) - submit_click_event = submit.click( - # fn=lambda x, y: ("",) + user(x, y)[1:], # clear msg - fn=user1, # clear msg - inputs=[msg, chatbot], - outputs=[msg, chatbot], - queue=True, - # queue=False, - show_progress="full", - # api_name=None, - ).then(bot, chatbot, chatbot, queue=True) - stop.click( - fn=None, - inputs=None, - outputs=None, - cancels=[msg_submit_event, submit_click_event], - queue=False, - ) - - # TODO: clear conversation memory as well - clear.click(lambda: None, None, chatbot, queue=False) - - with gr.Accordion("For Chat/Translation API", open=False, visible=False): - input_text = gr.Text() - api_btn = gr.Button("Go", variant="primary") - out_text = gr.Text() - - if conversation_api is not None: - api_btn.click( - predict_api, - input_text, - out_text, - api_name="api", - ) - -# concurrency_count=5, max_size=20 -# max_size=36, concurrency_count=14 -# CPU cpu_count=2 16G, model 7G -# CPU UPGRADE cpu_count=8 32G, model 7G - -# does not work -_ = """ -# _ = int(psutil.virtual_memory().total / 10**9 // file_size - 1) -# concurrency_count = max(_, 1) -if psutil.cpu_count(logical=False) >= 8: - # concurrency_count = max(int(32 / file_size) - 1, 1) -else: - # concurrency_count = max(int(16 / file_size) - 1, 1) -# """ - -concurrency_count = 1 -logger.info(f"{concurrency_count=}") - -block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True) diff --git a/spaces/ParisNeo/Blip_QA/README.md b/spaces/ParisNeo/Blip_QA/README.md deleted file mode 100644 index 5d911ee00bbe3b9aaf28fe96520137b4ca551aa1..0000000000000000000000000000000000000000 --- a/spaces/ParisNeo/Blip_QA/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Blip QA -emoji: 🚀 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false -license: mit ---- -BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation -This space shows how easy it is to use the BLIP model for image querrying. -[https://arxiv.org/abs/2201.12086](https://arxiv.org/abs/2201.12086) - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/page.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/page.go deleted file mode 100644 index cf3ab247ef5c2c773e4afac0c95f5cb72e585e66..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/page.go and /dev/null differ diff --git a/spaces/PeepDaSlan9/AutoGPT/autogpt/memory/pinecone.py b/spaces/PeepDaSlan9/AutoGPT/autogpt/memory/pinecone.py deleted file mode 100644 index 27fcd62482d0cf44e02fa1c339195be58cb745b0..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/AutoGPT/autogpt/memory/pinecone.py +++ /dev/null @@ -1,75 +0,0 @@ -import pinecone -from colorama import Fore, Style - -from autogpt.llm_utils import create_embedding_with_ada -from autogpt.logs import logger -from autogpt.memory.base import MemoryProviderSingleton - - -class PineconeMemory(MemoryProviderSingleton): - def __init__(self, cfg): - pinecone_api_key = cfg.pinecone_api_key - pinecone_region = cfg.pinecone_region - pinecone.init(api_key=pinecone_api_key, environment=pinecone_region) - dimension = 1536 - metric = "cosine" - pod_type = "p1" - table_name = "auto-gpt" - # this assumes we don't start with memory. - # for now this works. - # we'll need a more complicated and robust system if we want to start with - # memory. - self.vec_num = 0 - - try: - pinecone.whoami() - except Exception as e: - logger.typewriter_log( - "FAILED TO CONNECT TO PINECONE", - Fore.RED, - Style.BRIGHT + str(e) + Style.RESET_ALL, - ) - logger.double_check( - "Please ensure you have setup and configured Pinecone properly for use." - + f"You can check out {Fore.CYAN + Style.BRIGHT}" - "https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup" - f"{Style.RESET_ALL} to ensure you've set up everything correctly." - ) - exit(1) - - if table_name not in pinecone.list_indexes(): - pinecone.create_index( - table_name, dimension=dimension, metric=metric, pod_type=pod_type - ) - self.index = pinecone.Index(table_name) - - def add(self, data): - vector = create_embedding_with_ada(data) - # no metadata here. We may wish to change that long term. - self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})]) - _text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}" - self.vec_num += 1 - return _text - - def get(self, data): - return self.get_relevant(data, 1) - - def clear(self): - self.index.delete(deleteAll=True) - return "Obliviated" - - def get_relevant(self, data, num_relevant=5): - """ - Returns all the data in the memory that is relevant to the given data. - :param data: The data to compare to. - :param num_relevant: The number of relevant data to return. Defaults to 5 - """ - query_embedding = create_embedding_with_ada(data) - results = self.index.query( - query_embedding, top_k=num_relevant, include_metadata=True - ) - sorted_results = sorted(results.matches, key=lambda x: x.score) - return [str(item["metadata"]["raw_text"]) for item in sorted_results] - - def get_stats(self): - return self.index.describe_index_stats() diff --git a/spaces/PeepDaSlan9/Llama-2-AWS/responses.py b/spaces/PeepDaSlan9/Llama-2-AWS/responses.py deleted file mode 100644 index 7ce341095a466dcec978291a834f973b82ce6753..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/Llama-2-AWS/responses.py +++ /dev/null @@ -1,36 +0,0 @@ -# RESPONSE -import requests - -# Define the URL -url = "https://wcza44xtt6.execute-api.us-west-2.amazonaws.com/default/llama-osu" - - -def new_data(): - data = { - "inputs": [ - [ - ] - ], - "parameters": { - "max_new_tokens": 500, - "top_p": 0.9, # if you set top p to 0.9, the model will only consider the most likely words that make up 90% of the probability mass. - "temperature": 0.2 # creative level from 0 to 1 (the higher the more creative) - } - } - return data - - -data = new_data() -def get_response(prompt: str) -> str: - if(prompt.lower() == 'reset'): - global data - data = new_data() - return "You can start a new conversation" - else: - _dict = {"role": "user", "content": f"{prompt}" + " (Make your answer brief with several sentences)"} - data["inputs"][0].append(_dict) - response = requests.post(url, json=data) - response_dict = response.json()[0]['generation'] - data["inputs"][0].append(response_dict) - - return response.json()[0]['generation']['content'] \ No newline at end of file diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/arraymisc/quantization.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/arraymisc/quantization.py deleted file mode 100644 index 8e47a3545780cf071a1ef8195efb0b7b662c8186..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/arraymisc/quantization.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np - - -def quantize(arr, min_val, max_val, levels, dtype=np.int64): - """Quantize an array of (-inf, inf) to [0, levels-1]. - - Args: - arr (ndarray): Input array. - min_val (scalar): Minimum value to be clipped. - max_val (scalar): Maximum value to be clipped. - levels (int): Quantization levels. - dtype (np.type): The type of the quantized array. - - Returns: - tuple: Quantized array. - """ - if not (isinstance(levels, int) and levels > 1): - raise ValueError( - f'levels must be a positive integer, but got {levels}') - if min_val >= max_val: - raise ValueError( - f'min_val ({min_val}) must be smaller than max_val ({max_val})') - - arr = np.clip(arr, min_val, max_val) - min_val - quantized_arr = np.minimum( - np.floor(levels * arr / (max_val - min_val)).astype(dtype), levels - 1) - - return quantized_arr - - -def dequantize(arr, min_val, max_val, levels, dtype=np.float64): - """Dequantize an array. - - Args: - arr (ndarray): Input array. - min_val (scalar): Minimum value to be clipped. - max_val (scalar): Maximum value to be clipped. - levels (int): Quantization levels. - dtype (np.type): The type of the dequantized array. - - Returns: - tuple: Dequantized array. - """ - if not (isinstance(levels, int) and levels > 1): - raise ValueError( - f'levels must be a positive integer, but got {levels}') - if min_val >= max_val: - raise ValueError( - f'min_val ({min_val}) must be smaller than max_val ({max_val})') - - dequantized_arr = (arr + 0.5).astype(dtype) * (max_val - - min_val) / levels + min_val - - return dequantized_arr diff --git a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/modules/losses/__init__.py b/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/modules/losses/__init__.py deleted file mode 100644 index d09caf9eb805f849a517f1b23503e1a4d6ea1ec5..0000000000000000000000000000000000000000 --- a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/modules/losses/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from taming.modules.losses.vqperceptual import DummyLoss - diff --git a/spaces/RMXK/RVC_HFF/infer/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py b/spaces/RMXK/RVC_HFF/infer/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py deleted file mode 100644 index f9664fb1f89ef068e923211179e1c7e1ce7fdbd2..0000000000000000000000000000000000000000 --- a/spaces/RMXK/RVC_HFF/infer/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py +++ /dev/null @@ -1,87 +0,0 @@ -import numpy as np -import pyworld - -from infer.lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor - - -class HarvestF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def resize_f0(self, x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * target_len, len(source)) / target_len, - np.arange(0, len(source)), - source, - ) - res = np.nan_to_num(target) - return res - - def compute_f0(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.hop_length, - f0_ceil=self.f0_max, - f0_floor=self.f0_min, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/urllib3/util/queue.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/urllib3/util/queue.py deleted file mode 100644 index 41784104ee4bd5796006d1052536325d52db1e8c..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/urllib3/util/queue.py +++ /dev/null @@ -1,22 +0,0 @@ -import collections - -from ..packages import six -from ..packages.six.moves import queue - -if six.PY2: - # Queue is imported for side effects on MS Windows. See issue #229. - import Queue as _unused_module_Queue # noqa: F401 - - -class LifoQueue(queue.Queue): - def _init(self, _): - self.queue = collections.deque() - - def _qsize(self, len=len): - return len(self.queue) - - def _put(self, item): - self.queue.append(item) - - def _get(self): - return self.queue.pop() diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/_macos_compat.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/_macos_compat.py deleted file mode 100644 index 17769e9154bd9cc3f3c00dc10718e4377828cb5e..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/_macos_compat.py +++ /dev/null @@ -1,12 +0,0 @@ -import sys -import importlib - - -def bypass_compiler_fixup(cmd, args): - return cmd - - -if sys.platform == 'darwin': - compiler_fixup = importlib.import_module('_osx_support').compiler_fixup -else: - compiler_fixup = bypass_compiler_fixup diff --git a/spaces/Reha2704/VToonify/vtoonify/model/encoder/encoders/__init__.py b/spaces/Reha2704/VToonify/vtoonify/model/encoder/encoders/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/core/evaluation/class_names.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/core/evaluation/class_names.py deleted file mode 100644 index c2487c2ee2d010c40db0e1c2b51c91b194e84dc7..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/core/evaluation/class_names.py +++ /dev/null @@ -1,116 +0,0 @@ -import mmcv - - -def wider_face_classes(): - return ['face'] - - -def voc_classes(): - return [ - 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', - 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', - 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' - ] - - -def imagenet_det_classes(): - return [ - 'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', - 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', - 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', - 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', - 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', - 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', - 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', - 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', - 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', - 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', - 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', - 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', - 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', - 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', - 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', - 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', - 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', - 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', - 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', - 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', - 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', - 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', - 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', - 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', - 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', - 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', - 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', - 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', - 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', - 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', - 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', - 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', - 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', - 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', - 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', - 'whale', 'wine_bottle', 'zebra' - ] - - -def imagenet_vid_classes(): - return [ - 'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car', - 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda', - 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit', - 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle', - 'watercraft', 'whale', 'zebra' - ] - - -def coco_classes(): - return [ - 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', - 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', - 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', - 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', - 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', - 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', - 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', - 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', - 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', - 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', - 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', - 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', - 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush' - ] - - -def cityscapes_classes(): - return [ - 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', - 'bicycle' - ] - - -dataset_aliases = { - 'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'], - 'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'], - 'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'], - 'coco': ['coco', 'mscoco', 'ms_coco'], - 'wider_face': ['WIDERFaceDataset', 'wider_face', 'WIDERFace'], - 'cityscapes': ['cityscapes'] -} - - -def get_classes(dataset): - """Get class names of a dataset.""" - alias2name = {} - for name, aliases in dataset_aliases.items(): - for alias in aliases: - alias2name[alias] = name - - if mmcv.is_str(dataset): - if dataset in alias2name: - labels = eval(alias2name[dataset] + '_classes()') - else: - raise ValueError(f'Unrecognized dataset: {dataset}') - else: - raise TypeError(f'dataset must a str, but got {type(dataset)}') - return labels diff --git a/spaces/SHIBATAATSUSHI/aioccupationaltherapist2/constraints.md b/spaces/SHIBATAATSUSHI/aioccupationaltherapist2/constraints.md deleted file mode 100644 index 6bddcd1754c9ce6eb8233d6664851c14752f38b1..0000000000000000000000000000000000000000 --- a/spaces/SHIBATAATSUSHI/aioccupationaltherapist2/constraints.md +++ /dev/null @@ -1,7 +0,0 @@ -# 制約 - -- あなたは人の身体を作業療法で治療することを生きがいとした作業療法AIアシスタントです -- あなたはuserを作業療法することしか考えてないので、どんな話も作業療法で行う作業療法のプログラムを提供することで切り替えします -- あなたの返信は下記テンプレートに沿って生成します - -# テンプレート diff --git a/spaces/Sa-m/Vehicles-Detection-Custom-YoloV7/app.py b/spaces/Sa-m/Vehicles-Detection-Custom-YoloV7/app.py deleted file mode 100644 index 8be9d09f5392238346ae765c688ea50da2aadd00..0000000000000000000000000000000000000000 --- a/spaces/Sa-m/Vehicles-Detection-Custom-YoloV7/app.py +++ /dev/null @@ -1,171 +0,0 @@ -import torch -import argparse -import gradio as gr -from PIL import Image -from numpy import random -from pathlib import Path -import os -import time -import torch.backends.cudnn as cudnn -from models.experimental import attempt_load -import cv2 -from utils.datasets import LoadStreams, LoadImages -from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier,scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path -from utils.plots import plot_one_box -from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel - - -os.system('git clone https://github.com/WongKinYiu/yolov7') -os.system('wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt') -#os.system('wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-mask.pt') - -#model='best' -def Custom_detect(img,mode): - if mode=='Custom-Detection': - model='best' - #if mode=='Instance-Segmentation': - # model='yolov7-mask' - if mode=='Yolov7-model-detection': - model='yolov7' - - parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default=model+".pt", help='model.pt path(s)') - parser.add_argument('--source', type=str, default='Temp_files/', help='source') - parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--view-img', action='store_true', help='display results') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--nosave', action='store_true', help='do not save images/videos') - parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') - parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--update', action='store_true', help='update all models') - parser.add_argument('--project', default='runs/detect', help='save results to project/name') - parser.add_argument('--name', default='exp', help='save results to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--trace', action='store_true', help='trace model') - opt = parser.parse_args() - img.save("Temp_files/test.jpg") - source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, opt.trace - save_img = True - webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - save_dir = Path(increment_path(Path(opt.project)/opt.name,exist_ok=opt.exist_ok)) - - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) - set_logging() - device = select_device(opt.device) - half = device.type != 'cpu' - model = attempt_load(weights, map_location=device) - stride = int(model.stride.max()) - imgsz = check_img_size(imgsz, s=stride) - if trace: - model = TracedModel(model, device, opt.img_size) - if half: - model.half() - - classify = False - if classify: - modelc = load_classifier(name='resnet101', n=2) # initialize - modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval() - vid_path, vid_writer = None, None - if webcam: - view_img = check_imshow() - cudnn.benchmark = True - dataset = LoadStreams(source, img_size=imgsz, stride=stride) - else: - dataset = LoadImages(source, img_size=imgsz, stride=stride) - names = model.module.names if hasattr(model, 'module') else model.names - colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] - if device.type != 'cpu': - model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) - t0 = time.time() - for path, img, im0s, vid_cap in dataset: - img = torch.from_numpy(img).to(device) - img = img.half() if half else img.float() - img /= 255.0 - if img.ndimension() == 3: - img = img.unsqueeze(0) - - # Inference - t1 = time_synchronized() - pred = model(img, augment=opt.augment)[0] - - pred = non_max_suppression(pred,opt.conf_thres,opt.iou_thres,classes=opt.classes, agnostic=opt.agnostic_nms) - t2 = time_synchronized() - - - # Apply Classifier - if classify: - pred = apply_classifier(pred, modelc, img, im0s) - - for i, det in enumerate(pred): - if webcam: - p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count - else: - p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0) - - p = Path(p) - save_path = str(save_dir / p.name) - txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt - s += '%gx%g ' % img.shape[2:] - gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] - if len(det): - det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() - - - for c in det[:, -1].unique(): - n = (det[:, -1] == c).sum() - s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " - - - for *xyxy, conf, cls in reversed(det): - if save_txt: - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() - line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) - with open(txt_path + '.txt', 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') - - if save_img or view_img: - label = f'{names[int(cls)]} {conf:.2f}' - plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=2) - if view_img: - cv2.imshow(str(p), im0) - cv2.waitKey(1) - - if save_img: - if dataset.mode == 'image': - cv2.imwrite(save_path, im0) - else: - if vid_path != save_path: - vid_path = save_path - if isinstance(vid_writer, cv2.VideoWriter): - vid_writer.release() - if vid_cap: - fps = vid_cap.get(cv2.CAP_PROP_FPS) - w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - else: - fps, w, h = 30, im0.shape[1], im0.shape[0] - save_path += '.mp4' - vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) - vid_writer.write(im0) - - if save_txt or save_img: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - - print(f'Done. ({time.time() - t0:.3f}s)') - - return Image.fromarray(im0[:,:,::-1]) -inp = gr.Image(type="pil") -#"Custom-Detection","Yolov7-model-detection" -inp2= gr.Dropdown(choices=['Custom-Detection','Yolov7-model-detection']) -output = gr.Image(type="pil") - -examples=[["Examples/Image1.jpg","Image1"],["Examples/Image14.jpg","Image14"],["Examples/Image32.jpg","Image32"]] - -io=gr.Interface(fn=Custom_detect, inputs=[inp,inp2], outputs=output, title='Vehicle Detection With Custom YOLOv7') -io.launch() - diff --git a/spaces/Salesforce/EDICT/my_half_diffusers/utils/outputs.py b/spaces/Salesforce/EDICT/my_half_diffusers/utils/outputs.py deleted file mode 100644 index b02f62d02d0322401fd9926aca9f792a4696cc1e..0000000000000000000000000000000000000000 --- a/spaces/Salesforce/EDICT/my_half_diffusers/utils/outputs.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Generic utilities -""" - -import warnings -from collections import OrderedDict -from dataclasses import fields -from typing import Any, Tuple - -import numpy as np - -from .import_utils import is_torch_available - - -def is_tensor(x): - """ - Tests if `x` is a `torch.Tensor` or `np.ndarray`. - """ - if is_torch_available(): - import torch - - if isinstance(x, torch.Tensor): - return True - - return isinstance(x, np.ndarray) - - -class BaseOutput(OrderedDict): - """ - Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a - tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular - python dictionary. - - - - You can't unpack a `BaseOutput` directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple - before. - - - """ - - def __post_init__(self): - class_fields = fields(self) - - # Safety and consistency checks - if not len(class_fields): - raise ValueError(f"{self.__class__.__name__} has no fields.") - - for field in class_fields: - v = getattr(self, field.name) - if v is not None: - self[field.name] = v - - def __delitem__(self, *args, **kwargs): - raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") - - def setdefault(self, *args, **kwargs): - raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") - - def pop(self, *args, **kwargs): - raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") - - def update(self, *args, **kwargs): - raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") - - def __getitem__(self, k): - if isinstance(k, str): - inner_dict = {k: v for (k, v) in self.items()} - if self.__class__.__name__ in ["StableDiffusionPipelineOutput", "ImagePipelineOutput"] and k == "sample": - warnings.warn( - "The keyword 'samples' is deprecated and will be removed in version 0.4.0. Please use `.images` or" - " `'images'` instead.", - DeprecationWarning, - ) - return inner_dict["images"] - return inner_dict[k] - else: - return self.to_tuple()[k] - - def __setattr__(self, name, value): - if name in self.keys() and value is not None: - # Don't call self.__setitem__ to avoid recursion errors - super().__setitem__(name, value) - super().__setattr__(name, value) - - def __setitem__(self, key, value): - # Will raise a KeyException if needed - super().__setitem__(key, value) - # Don't call self.__setattr__ to avoid recursion errors - super().__setattr__(key, value) - - def to_tuple(self) -> Tuple[Any]: - """ - Convert self to a tuple containing all the attributes/keys that are not `None`. - """ - return tuple(self[k] for k in self.keys()) diff --git a/spaces/Sandiago21/automatic-speech-recognition-german/app.py b/spaces/Sandiago21/automatic-speech-recognition-german/app.py deleted file mode 100644 index e89a3dcb038f094b73348ebb733135d61272b63f..0000000000000000000000000000000000000000 --- a/spaces/Sandiago21/automatic-speech-recognition-german/app.py +++ /dev/null @@ -1,54 +0,0 @@ -import torch -import gradio as gr -from transformers import pipeline - -model_id = "Sandiago21/whisper-large-v2-german-2" # update with your model id -pipe = pipeline("automatic-speech-recognition", model=model_id) - - -title = "Automatic Speech Recognition (ASR)" -description = """ -Demo for automatic speech recognition in German. Demo uses [Sandiago21/whisper-large-v2-german-2](https://huggingface.co/Sandiago21/whisper-large-v2-german-2) checkpoint, which is based on OpenAI's -[Whisper](https://huggingface.co/openai/whisper-large-v2) model and is fine-tuned in German Audio dataset -![Automatic Speech Recognition (ASR)"](https://datasets-server.huggingface.co/assets/huggingface-course/audio-course-images/--/huggingface-course--audio-course-images/train/2/image/image.png "Diagram of Automatic Speech Recognition (ASR)") -""" - -def transcribe_speech(filepath): - output = pipe( - filepath, - max_new_tokens=256, - generate_kwargs={ - "task": "transcribe", - "language": "german", - }, # update with the language you've fine-tuned on - chunk_length_s=30, - batch_size=8, - ) - return output["text"] - -demo = gr.Blocks() - -mic_transcribe = gr.Interface( - fn=transcribe_speech, - inputs=gr.Audio(source="microphone", type="filepath"), - outputs=gr.outputs.Textbox(), - tilte=title, - description=description, -) - -file_transcribe = gr.Interface( - fn=transcribe_speech, - inputs=gr.Audio(source="upload", type="filepath"), - outputs=gr.outputs.Textbox(), - examples=[["./example.wav"]], - tilte=title, - description=description, -) - -with demo: - gr.TabbedInterface( - [mic_transcribe, file_transcribe], - ["Transcribe Microphone", "Transcribe Audio File"], - ), - -demo.launch() diff --git a/spaces/Sangamesh/Cat_Dog_Classifier/app.py b/spaces/Sangamesh/Cat_Dog_Classifier/app.py deleted file mode 100644 index 98a765572f722e9c82491a368eadc4a24e496ce7..0000000000000000000000000000000000000000 --- a/spaces/Sangamesh/Cat_Dog_Classifier/app.py +++ /dev/null @@ -1,56 +0,0 @@ -#|default_expv app - -#cell -import numpy as np - -#cell -from fastai.vision.all import * - -#cell -#|export -from fastai.learner import load_learner -from PIL import Image -import gradio as gr - -def is_cat(x): return x[0].isupper() - -#cell -import pathlib -plt = platform.system() -if plt == 'Windows': pathlib.PosixPath = pathlib.WindowsPath - - -#cell -im = PILImage.create('dog.jpg') -im.thumbnail((192,192)) -im - - -#cell -#|export -learn = load_learner('model.pkl') - -#cell -learn.predict(im) - -#cell -#|export -categories = ('Dog','Cat') - -def classify_images(img): - pred,idx,probs = learn.predict(img) - return dict(zip(categories, map(float,probs))) - - #cell - classify_images(im) - - #cell - #|export -image = gr.inputs.Image(shape=(192,192)) -label = gr.outputs.Label() - -intf = gr.Interface(fn=classify_images, inputs=image, outputs=label) -intf.launch(inline=False) - -#cell -m = learn.model \ No newline at end of file diff --git a/spaces/Silentlin/DiffSinger/vocoders/__init__.py b/spaces/Silentlin/DiffSinger/vocoders/__init__.py deleted file mode 100644 index 66c318857ce48048437dede7072901ad6471b8fc..0000000000000000000000000000000000000000 --- a/spaces/Silentlin/DiffSinger/vocoders/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from vocoders import hifigan diff --git a/spaces/SmartPy/ScisummNet/app.py b/spaces/SmartPy/ScisummNet/app.py deleted file mode 100644 index 6ce0876f3cd6a69f80688a40525ecb60bc942001..0000000000000000000000000000000000000000 --- a/spaces/SmartPy/ScisummNet/app.py +++ /dev/null @@ -1,281 +0,0 @@ -import os -import contextlib -import logging -import random -import re -import time -from pathlib import Path - -import gradio as gr -import nltk -from cleantext import clean - -from summarize import load_model_and_tokenizer, summarize_via_tokenbatches -from utils import load_example_filenames, truncate_word_count, saves_summary -from textrank import get_summary - -example_path = "./" -nltk.download("stopwords") - -logging.basicConfig( - level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" -) - - -def proc_submission( - input_text: str, - model_size: str, - num_beams, - token_batch_length, - length_penalty, - repetition_penalty, - no_repeat_ngram_size, - max_input_length: int = 1024, -): - - settings = { - "length_penalty": float(length_penalty), - "repetition_penalty": float(repetition_penalty), - "no_repeat_ngram_size": int(no_repeat_ngram_size), - "encoder_no_repeat_ngram_size": 4, - "num_beams": int(num_beams), - "min_length": 4, - "max_length": int(token_batch_length // 4), - "early_stopping": True, - "do_sample": False, - } - st = time.perf_counter() - history = {} - clean_text = clean(input_text, lower=False) - max_input_length = 1024 if "base" in model_size.lower() else max_input_length - clean_text = get_summary(clean_text) - processed = truncate_word_count(clean_text, max_input_length) - - if processed["was_truncated"]: - tr_in = processed["truncated_text"] - # create elaborate HTML warning - input_wc = re.split(r"\s+", input_text) - msg = f""" -
    -

    Warning

    -

    Input text was truncated to {max_input_length} words. That's about {100*max_input_length/len(input_wc):.2f}% of the submission.

    -
    - """ - logging.warning(msg) - history["WARNING"] = msg - else: - tr_in = input_text - msg = None - - if len(input_text) < 50: - # this is essentially a different case from the above - msg = f""" -
    -

    Warning

    -

    Input text is too short to summarize. Detected {len(input_text)} characters. - Please load text by selecting an example from the dropdown menu or by pasting text into the text box.

    -
    - """ - logging.warning(msg) - logging.warning("RETURNING EMPTY STRING") - history["WARNING"] = msg - - return msg, "", [] - - _summaries = summarize_via_tokenbatches( - tr_in, - model, - tokenizer, - batch_length=token_batch_length, - **settings, - ) - sum_text = [f"Section {i}: " + s["summary"][0] for i, s in enumerate(_summaries)] - sum_scores = [ - f" - Section {i}: {round(s['summary_score'],4)}" - for i, s in enumerate(_summaries) - ] - - sum_text_out = "\n".join(sum_text) - history["Summary Scores"] = "

    " - scores_out = "\n".join(sum_scores) - rt = round((time.perf_counter() - st) / 60, 2) - print(f"Runtime: {rt} minutes") - html = "" - html += f"

    Runtime: {rt} minutes on CPU

    " - if msg is not None: - html += msg - - html += "" - - # save to file - saved_file = saves_summary(_summaries) - - return html, sum_text_out, scores_out, saved_file - - -def load_single_example_text( - example_path: str or Path="./example.txt", - max_pages=20, -): - """ - load_single_example - a helper function for the gradio module to load examples - Returns: - list of str, the examples - """ - global name_to_path - full_ex_path = name_to_path[example_path] - full_ex_path = Path(full_ex_path) - - if full_ex_path.suffix == ".txt": - with open(full_ex_path, "r", encoding="utf-8", errors="ignore") as f: - raw_text = f.read() - text = clean(raw_text, lower=False) - else: - logging.error(f"Unknown file type {full_ex_path.suffix}") - text = "ERROR - check example path" - - return text - -if __name__ == "__main__": - logging.info("Starting app instance") - os.environ[ - "TOKENIZERS_PARALLELISM" - ] = "false" # parallelism on tokenizers is buggy with gradio - logging.info("Loading summ models") - with contextlib.redirect_stdout(None): - model, tokenizer = load_model_and_tokenizer( - "SmartPy/bart-large-cnn-finetuned-scientific_summarize" - ) - - name_to_path = load_example_filenames(example_path) - logging.info(f"Loaded {len(name_to_path)} examples") - demo = gr.Blocks() - _examples = list(name_to_path.keys()) - with demo: - - gr.Markdown("# Document Summarization with Long-Document Transformers") - gr.Markdown( - "This is an example use case for fine-tuned long document transformers. The model is trained on Scientific Article summaries (via the Yale Scientific Article Summarization Dataset). The models in this demo are [Bart-large-cnn](https://huggingface.co/facebook/bart-large-cnn)." - ) - with gr.Column(): - - gr.Markdown("## Load Inputs & Select Parameters") - gr.Markdown( - "Enter text below in the text area. The text will be summarized [using the selected parameters](https://huggingface.co/blog/how-to-generate). " - ) - with gr.Row(variant="compact"): - with gr.Column(scale=0.5, variant="compact"): - - model_size = gr.Radio( - choices=["bart-large-cnn"], - label="Model Variant", - value="bart-large-cnn", - ) - num_beams = gr.Radio( - choices=[2, 3, 4], - label="Beam Search: # of Beams", - value=2, - ) - with gr.Column(variant="compact"): - example_name = gr.Dropdown( - _examples, - label="Examples", - value=random.choice(_examples), - ) - - with gr.Row(): - input_text = gr.Textbox( - lines=4, - label="Input Text (for summarization)", - placeholder="Enter text to summarize, the text will be cleaned and truncated on Spaces. Narrative, academic (both papers and lecture transcription), and article text work well. May take a bit to generate depending on the input text :)", - ) - with gr.Column(min_width=100, scale=0.5): - load_examples_button = gr.Button( - "Load Example", - ) - - with gr.Column(): - gr.Markdown("## Generate Summary") - gr.Markdown( - "Summarization should take ~1-2 minutes for most settings, but may extend up to 5-10 minutes in some scenarios." - ) - summarize_button = gr.Button( - "Summarize!", - variant="primary", - ) - - output_text = gr.HTML("

    Output will appear below:

    ") - gr.Markdown("### Summary Output") - summary_text = gr.Textbox( - label="Summary", placeholder="The generated summary will appear here" - ) - gr.Markdown( - "The summary scores can be thought of as representing the quality of the summary. less-negative numbers (closer to 0) are better:" - ) - summary_scores = gr.Textbox( - label="Summary Scores", placeholder="Summary scores will appear here" - ) - - text_file = gr.File( - label="Download Summary as Text File", - file_count="single", - type="file", - interactive=False, - ) - - gr.Markdown("---") - with gr.Column(): - gr.Markdown("### Advanced Settings") - with gr.Row(variant="compact"): - length_penalty = gr.inputs.Slider( - minimum=0.5, - maximum=1.0, - label="length penalty", - default=0.7, - step=0.05, - ) - token_batch_length = gr.Radio( - choices=[512, 768, 1024, 1536], - label="token batch length", - value=1024, - ) - - with gr.Row(variant="compact"): - repetition_penalty = gr.inputs.Slider( - minimum=1.0, - maximum=5.0, - label="repetition penalty", - default=3.5, - step=0.1, - ) - no_repeat_ngram_size = gr.Radio( - choices=[2, 3, 4], - label="no repeat ngram size", - value=3, - ) - with gr.Column(): - gr.Markdown("### About the Model") - gr.Markdown( - "These models are fine-tuned on the [1000 most cited papers in the ACL Anthology Network (AAN)](http://arxiv.org/pdf/1909.01716.pdf).The goal was to create a model that can generalize well and is useful in summarizing lots of text in academic and daily usage." - ) - gr.Markdown("---") - - load_examples_button.click( - fn=load_single_example_text, inputs=[example_name], outputs=[input_text] - ) - - summarize_button.click( - fn=proc_submission, - inputs=[ - input_text, - model_size, - num_beams, - token_batch_length, - length_penalty, - repetition_penalty, - no_repeat_ngram_size, - ], - outputs=[output_text, summary_text, summary_scores, text_file], - ) - - demo.launch(enable_queue=True, debug=True) diff --git a/spaces/StatsByZach/app/on_ice_xg_rates.py b/spaces/StatsByZach/app/on_ice_xg_rates.py deleted file mode 100644 index f9062140fcc58dd2ee721542df3779840ee9149d..0000000000000000000000000000000000000000 --- a/spaces/StatsByZach/app/on_ice_xg_rates.py +++ /dev/null @@ -1,282 +0,0 @@ -##### on_ice_xg_rates.py ##### -# A program to display skaters on-ice xG rates -# Zach Andrews - -# Import modules -from shiny import * -import shinyswatch -import plotly.graph_objs as go -from shinywidgets import output_widget, register_widget, render_widget -import pandas as pd -import plotly.express as px -from configure import base_url - -path = "data/on_ice_xg.csv" - -df = pd.read_csv(path) - -def server(input, output, session): - @output - @render.table - def table(): - df = pd.read_csv(path) - if input.z() == "T": - asc = True - else: - asc = False - if input.strength()=="even": - df = df[(df['Team']==input.x())&(df['EV_TOI']>=input.toi())] - if input.y() == "xGF/60": - df = df[['Player','EV_TOI','EV_xGF/60','EV_xGA/60']].sort_values(by='EV_xGF/60',ascending=asc).round(3) - elif input.y() == "xGA/60": - df = df[['Player','EV_TOI','EV_xGF/60','EV_xGA/60']].sort_values(by='EV_xGA/60',ascending=asc).round(3) - elif input.y() == 'TOI': - df = df[['Player','EV_TOI','EV_xGF/60','EV_xGA/60']].sort_values(by='EV_TOI',ascending=asc).round(3) - else: - df = df[['Player','EV_TOI','EV_xGF/60','EV_xGA/60']].sort_values(by=input.y(),ascending=asc).round(3) - elif input.strength()=="_5v5": - df = df[(df['Team']==input.x())&(df['5v5_TOI']>=input.toi())] - if input.y() == "xGF/60": - df = df[['Player','5v5_TOI','5v5_xGF/60','5v5_xGA/60']].sort_values(by='5v5_xGF/60',ascending=asc).round(3) - elif input.y() == "xGA/60": - df = df[['Player','5v5_TOI','5v5_xGF/60','5v5_xGA/60']].sort_values(by='5v5_xGA/60',ascending=asc).round(3) - elif input.y() == 'TOI': - df = df[['Player','5v5_TOI','5v5_xGF/60','5v5_xGA/60']].sort_values(by='5v5_TOI',ascending=asc).round(3) - else: - df = df[['Player','5v5_TOI','5v5_xGF/60','5v5_xGA/60']].sort_values(by=input.y(),ascending=asc).round(3) - else: - df = df[(df['Team']==input.x())&(df['ALL_TOI']>=input.toi())] - if input.y() == "xGF/60": - df = df[['Player','ALL_TOI','ALL_xGF/60','ALL_xGA/60']].sort_values(by='ALL_xGF/60',ascending=asc).round(3) - elif input.y() == "xGA/60": - df = df[['Player','ALL_TOI','ALL_xGF/60','ALL_xGA/60']].sort_values(by='ALL_xGA/60',ascending=asc).round(3) - elif input.y() == 'TOI': - df = df[['Player','ALL_TOI','ALL_xGF/60','ALL_xGA/60']].sort_values(by='ALL_TOI',ascending=asc).round(3) - else: - df = df[['Player','ALL_TOI','ALL_xGF/60','ALL_xGA/60']].sort_values(by=input.y(),ascending=asc).round(3) - return df - - - - @output - @render_widget - def my_widget(): - df = pd.read_csv(path) - team = input.x() - if input.strength()=="even": - title_strength = "Even Strength" - title_toi = "EV" - x_col = "EV_xGF/60" - y_col = "EV_xGA/60" - x_title = "Even Strength xGF/60" - y_title = "Even Strength xGA/60" - color_for_chart = "EV_TOI" - data = df[(df['Team']==team)&(df['EV_TOI']>=input.toi())] - elif input.strength()=="_5v5": - title_strength="5v5" - title_toi="5v5" - x_col = "5v5_xGF/60" - y_col = "5v5_xGA/60" - x_title = "5v5 xGF/60" - y_title = "5v5 xGA/60" - color_for_chart="5v5_TOI" - data = df[(df['Team']==team)&(df['5v5_TOI']>=input.toi())] - else: - title_strength="All Situation" - title_toi="All" - x_col = "ALL_xGF/60" - y_col = "ALL_xGA/60" - x_title = "All Situation xGF/60" - y_title = "All Situation xGA/60" - color_for_chart="ALL_TOI" - data = df[(df['Team']==team)&(df['ALL_TOI']>=input.toi())] - fig = px.scatter(data, x_col, y_col,color=color_for_chart,template="plotly_dark",height=1050,width=1050,text='Player') - fig.update_traces(textposition='top right',marker=dict(size=10)) - fig.update(layout_xaxis_range = [.01,6]) - fig.update(layout_yaxis_range = [6,.01]) - fig.update_layout(xaxis_showgrid=False, yaxis_showgrid=False,plot_bgcolor="#222222",paper_bgcolor="#222222") - fig.update_layout( - title=(team + " Skaters "+title_strength+" On-Ice xG Rates
    "+ - "2023-24 NHL Regular Season
    " + - "Minimum "+ str(input.toi()) + " " + title_toi + " TOI"), - margin=dict(r=20, l=40, b=100, t=90), - template='plotly_dark') - fig.add_annotation( - text = ("Data: @StatsByZach on Twitter") - , showarrow=False - , x = .80 - , y = -.045 - , xref='paper' - , yref='paper' - , xanchor='left' - , yanchor='bottom' - , xshift=-1 - , yshift=-5 - , font=dict(size=11, color="white") - , align="left" - ) - fig.update_layout(xaxis_title=x_title) - fig.update_layout(yaxis_title=y_title) - return fig - - @reactive.Effect - def _(): - val = input.quant() - - if input.strength()=="even": - calc = "EV_TOI" - elif input.strength()=="_5v5": - calc = "5v5_TOI" - else: - calc = "ALL_TOI" - - if val == "_25": - q= round(df[calc].quantile(.25),1) - elif val == "_50": - q= round(df[calc].quantile(.5),1) - elif val == "_75": - q=round(df[calc].quantile(.75),1) - else: - q=0 - ui.update_slider( - "toi", value=q - ) - - @reactive.Effect - def _2(): - btn = input.btn() - if btn % 2 == 1: - tab = ui.output_table("table") - ui.insert_ui( - ui.div({"id": "inserted-slider"},ui.tags.h5("Sort Table by", class_="app-heading"),ui.input_select("y","",{"Player":"Player","TOI":"TOI","xGF/60":"xGF/60","xGA/60":"xGA/60"}), - ui.input_radio_buttons( - "z", "", {"F": "High to Low", "T": "Low to High"} - ),ui.output_table("table")), - selector="#main-content", - where="beforeEnd", - ) - elif btn > 0: - ui.remove_ui("#inserted-slider") - -on_ice_xg = App(ui.page_fluid( - ui.tags.base(href=base_url), - ui.tags.div( - {"style": "width:75%;margin: 0 auto"}, - ui.tags.style( - """ - h4 { - margin-top: 1em;font-size:35px; - } - h2{ - font-size:25px; - } - """ - ), - shinyswatch.theme.darkly(), - ui.tags.h4("Stats By Zach"), - ui.tags.i("A website for hockey analytics"), - ui.navset_tab( - ui.nav_control( - ui.a( - "Home", - href="home/" - ), - ), - ui.nav_menu( - "Skater Charts", - ui.nav_control( - ui.a( - "On-Ice xG Rates", - href="skater-xg-rates/" - ), - ui.a( - "On-Ice xGF%", - href="skater-xg-percentages/" - ), - ), - ), - ui.nav_menu( - "Goalie Charts", - ui.nav_control( - ui.a( - "GSAx Timeline", - href="gsax-timeline/" - ), - ui.a( - "GSAx Leaderboard", - href="gsax-leaderboard/" - ), - ui.a( - "GSAx Comparison", - href="gsax-comparison/" - ) - ), - ),ui.nav_menu( - "Team Charts", - ui.nav_control( - ui.a( - "Team xG Rates", - href="team-xg-rates/" - ), - ), - ),ui.nav_control( - ui.a( - "Games", - href="games/" - ), - ),ui.nav_control( - ui.a( - "About", - href="about/" - ), - ) - ),ui.row( - ui.column(3,ui.tags.br(),ui.tags.h2("On-Ice xG Rates"),ui.tags.h5("Team", class_="app-heading"), - ui.input_select("x", "", { "ANA": "Anaheim Ducks", - "ARI": "Arizona Coyotes", - "BOS": "Boston Bruins", - "BUF": "Buffalo Sabres", - "CGY": "Calgary Flames", - "CAR": "Carolina Hurricanes", - "CHI": "Chicago Blackhawks", - "COL": "Colorado Avalanche", - "CBJ": "Columbus Blue Jackets", - "DAL": "Dallas Stars", - "DET": "Detroit Red Wings", - "EDM": "Edmonton Oilers", - "FLA": "Florida Panthers", - "L.A": "Los Angeles Kings", - "MIN": "Minnesota Wild", - "MTL": "Montreal Canadiens", - "NSH": "Nashville Predators", - "N.J": "New Jersey Devils", - "NYI": "New York Islanders", - "NYR": "New York Rangers", - "OTT": "Ottawa Senators", - "PHI": "Philadelphia Flyers", - "PIT": "Pittsburgh Penguins", - "S.J": "San Jose Sharks", - "SEA":"Seattle Kraken", - "STL": "St. Louis Blues", - "T.B": "Tampa Bay Lightning", - "TOR": "Toronto Maple Leafs", - "VAN": "Vancouver Canucks", - "VGK": "Vegas Golden Knights", - "WSH": "Washington Capitals", - "WPG": "Winnipeg Jets"}),ui.tags.h5("Strength", class_="app-heading"),ui.input_select("strength", "",{'even':"Even",'_5v5':"5v5",'All':"All Situations"}), - ui.tags.h5("Minimum TOI", class_="app-heading"), - ui.input_slider("toi", "", min=0, max=round(df['ALL_TOI'].max(),0), value=round(df['EV_TOI'].quantile(.25),1)), - ui.tags.h5("TOI Percentile (Among All NHL Skaters)",class_="app-heading"), - ui.input_radio_buttons( - "quant", - "", - { - "_25": "Top 75%", - "_50": "Top 50%", - "_75": "Top 25%", - }, - ), ui.input_action_button("btn", "Toggle Table"),ui.div({"id":"main-content"}), - #ui.output_table("table") - ), - ui.column(9,output_widget("my_widget"),title="Stats By Zach", - )))),server) diff --git a/spaces/Sumit7864/Image-Enhancer/realesrgan/models/realesrgan_model.py b/spaces/Sumit7864/Image-Enhancer/realesrgan/models/realesrgan_model.py deleted file mode 100644 index c298a09c42433177f90001a0a31d029576072ccd..0000000000000000000000000000000000000000 --- a/spaces/Sumit7864/Image-Enhancer/realesrgan/models/realesrgan_model.py +++ /dev/null @@ -1,258 +0,0 @@ -import numpy as np -import random -import torch -from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt -from basicsr.data.transforms import paired_random_crop -from basicsr.models.srgan_model import SRGANModel -from basicsr.utils import DiffJPEG, USMSharp -from basicsr.utils.img_process_util import filter2D -from basicsr.utils.registry import MODEL_REGISTRY -from collections import OrderedDict -from torch.nn import functional as F - - -@MODEL_REGISTRY.register() -class RealESRGANModel(SRGANModel): - """RealESRGAN Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. - - It mainly performs: - 1. randomly synthesize LQ images in GPU tensors - 2. optimize the networks with GAN training. - """ - - def __init__(self, opt): - super(RealESRGANModel, self).__init__(opt) - self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts - self.usm_sharpener = USMSharp().cuda() # do usm sharpening - self.queue_size = opt.get('queue_size', 180) - - @torch.no_grad() - def _dequeue_and_enqueue(self): - """It is the training pair pool for increasing the diversity in a batch. - - Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a - batch could not have different resize scaling factors. Therefore, we employ this training pair pool - to increase the degradation diversity in a batch. - """ - # initialize - b, c, h, w = self.lq.size() - if not hasattr(self, 'queue_lr'): - assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' - self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() - _, c, h, w = self.gt.size() - self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda() - self.queue_ptr = 0 - if self.queue_ptr == self.queue_size: # the pool is full - # do dequeue and enqueue - # shuffle - idx = torch.randperm(self.queue_size) - self.queue_lr = self.queue_lr[idx] - self.queue_gt = self.queue_gt[idx] - # get first b samples - lq_dequeue = self.queue_lr[0:b, :, :, :].clone() - gt_dequeue = self.queue_gt[0:b, :, :, :].clone() - # update the queue - self.queue_lr[0:b, :, :, :] = self.lq.clone() - self.queue_gt[0:b, :, :, :] = self.gt.clone() - - self.lq = lq_dequeue - self.gt = gt_dequeue - else: - # only do enqueue - self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone() - self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone() - self.queue_ptr = self.queue_ptr + b - - @torch.no_grad() - def feed_data(self, data): - """Accept data from dataloader, and then add two-order degradations to obtain LQ images. - """ - if self.is_train and self.opt.get('high_order_degradation', True): - # training data synthesis - self.gt = data['gt'].to(self.device) - self.gt_usm = self.usm_sharpener(self.gt) - - self.kernel1 = data['kernel1'].to(self.device) - self.kernel2 = data['kernel2'].to(self.device) - self.sinc_kernel = data['sinc_kernel'].to(self.device) - - ori_h, ori_w = self.gt.size()[2:4] - - # ----------------------- The first degradation process ----------------------- # - # blur - out = filter2D(self.gt_usm, self.kernel1) - # random resize - updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0] - if updown_type == 'up': - scale = np.random.uniform(1, self.opt['resize_range'][1]) - elif updown_type == 'down': - scale = np.random.uniform(self.opt['resize_range'][0], 1) - else: - scale = 1 - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, scale_factor=scale, mode=mode) - # add noise - gray_noise_prob = self.opt['gray_noise_prob'] - if np.random.uniform() < self.opt['gaussian_noise_prob']: - out = random_add_gaussian_noise_pt( - out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob) - else: - out = random_add_poisson_noise_pt( - out, - scale_range=self.opt['poisson_scale_range'], - gray_prob=gray_noise_prob, - clip=True, - rounds=False) - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range']) - out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts - out = self.jpeger(out, quality=jpeg_p) - - # ----------------------- The second degradation process ----------------------- # - # blur - if np.random.uniform() < self.opt['second_blur_prob']: - out = filter2D(out, self.kernel2) - # random resize - updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0] - if updown_type == 'up': - scale = np.random.uniform(1, self.opt['resize_range2'][1]) - elif updown_type == 'down': - scale = np.random.uniform(self.opt['resize_range2'][0], 1) - else: - scale = 1 - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate( - out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode) - # add noise - gray_noise_prob = self.opt['gray_noise_prob2'] - if np.random.uniform() < self.opt['gaussian_noise_prob2']: - out = random_add_gaussian_noise_pt( - out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob) - else: - out = random_add_poisson_noise_pt( - out, - scale_range=self.opt['poisson_scale_range2'], - gray_prob=gray_noise_prob, - clip=True, - rounds=False) - - # JPEG compression + the final sinc filter - # We also need to resize images to desired sizes. We group [resize back + sinc filter] together - # as one operation. - # We consider two orders: - # 1. [resize back + sinc filter] + JPEG compression - # 2. JPEG compression + [resize back + sinc filter] - # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines. - if np.random.uniform() < 0.5: - # resize back + the final sinc filter - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) - out = filter2D(out, self.sinc_kernel) - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) - out = torch.clamp(out, 0, 1) - out = self.jpeger(out, quality=jpeg_p) - else: - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) - out = torch.clamp(out, 0, 1) - out = self.jpeger(out, quality=jpeg_p) - # resize back + the final sinc filter - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) - out = filter2D(out, self.sinc_kernel) - - # clamp and round - self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255. - - # random crop - gt_size = self.opt['gt_size'] - (self.gt, self.gt_usm), self.lq = paired_random_crop([self.gt, self.gt_usm], self.lq, gt_size, - self.opt['scale']) - - # training pair pool - self._dequeue_and_enqueue() - # sharpen self.gt again, as we have changed the self.gt with self._dequeue_and_enqueue - self.gt_usm = self.usm_sharpener(self.gt) - self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract - else: - # for paired training or validation - self.lq = data['lq'].to(self.device) - if 'gt' in data: - self.gt = data['gt'].to(self.device) - self.gt_usm = self.usm_sharpener(self.gt) - - def nondist_validation(self, dataloader, current_iter, tb_logger, save_img): - # do not use the synthetic process during validation - self.is_train = False - super(RealESRGANModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img) - self.is_train = True - - def optimize_parameters(self, current_iter): - # usm sharpening - l1_gt = self.gt_usm - percep_gt = self.gt_usm - gan_gt = self.gt_usm - if self.opt['l1_gt_usm'] is False: - l1_gt = self.gt - if self.opt['percep_gt_usm'] is False: - percep_gt = self.gt - if self.opt['gan_gt_usm'] is False: - gan_gt = self.gt - - # optimize net_g - for p in self.net_d.parameters(): - p.requires_grad = False - - self.optimizer_g.zero_grad() - self.output = self.net_g(self.lq) - - l_g_total = 0 - loss_dict = OrderedDict() - if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters): - # pixel loss - if self.cri_pix: - l_g_pix = self.cri_pix(self.output, l1_gt) - l_g_total += l_g_pix - loss_dict['l_g_pix'] = l_g_pix - # perceptual loss - if self.cri_perceptual: - l_g_percep, l_g_style = self.cri_perceptual(self.output, percep_gt) - if l_g_percep is not None: - l_g_total += l_g_percep - loss_dict['l_g_percep'] = l_g_percep - if l_g_style is not None: - l_g_total += l_g_style - loss_dict['l_g_style'] = l_g_style - # gan loss - fake_g_pred = self.net_d(self.output) - l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False) - l_g_total += l_g_gan - loss_dict['l_g_gan'] = l_g_gan - - l_g_total.backward() - self.optimizer_g.step() - - # optimize net_d - for p in self.net_d.parameters(): - p.requires_grad = True - - self.optimizer_d.zero_grad() - # real - real_d_pred = self.net_d(gan_gt) - l_d_real = self.cri_gan(real_d_pred, True, is_disc=True) - loss_dict['l_d_real'] = l_d_real - loss_dict['out_d_real'] = torch.mean(real_d_pred.detach()) - l_d_real.backward() - # fake - fake_d_pred = self.net_d(self.output.detach().clone()) # clone for pt1.9 - l_d_fake = self.cri_gan(fake_d_pred, False, is_disc=True) - loss_dict['l_d_fake'] = l_d_fake - loss_dict['out_d_fake'] = torch.mean(fake_d_pred.detach()) - l_d_fake.backward() - self.optimizer_d.step() - - if self.ema_decay > 0: - self.model_ema(decay=self.ema_decay) - - self.log_dict = self.reduce_loss_dict(loss_dict) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/datatypes/base.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/datatypes/base.py deleted file mode 100644 index 302cb2fb384f95939312420a69fd26b18a613d4c..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/datatypes/base.py +++ /dev/null @@ -1,364 +0,0 @@ -import array -import logging - -from abc import ABC -from math import log -from typing import NamedTuple, Dict, Type, Any, Sequence, MutableSequence, Optional, Union, Collection - -from clickhouse_connect.driver.common import array_type, int_size, write_array, write_uint64, low_card_version -from clickhouse_connect.driver.context import BaseQueryContext -from clickhouse_connect.driver.ctypes import numpy_conv, data_conv -from clickhouse_connect.driver.exceptions import NotSupportedError -from clickhouse_connect.driver.insert import InsertContext -from clickhouse_connect.driver.query import QueryContext -from clickhouse_connect.driver.types import ByteSource -from clickhouse_connect.driver.options import np, pd - -logger = logging.getLogger(__name__) -ch_read_formats = {} -ch_write_formats = {} - - -class TypeDef(NamedTuple): - """ - Immutable tuple that contains all additional information needed to construct a particular ClickHouseType - """ - wrappers: tuple = () - keys: tuple = () - values: tuple = () - - @property - def arg_str(self): - return f"({', '.join(str(v) for v in self.values)})" if self.values else '' - - -class ClickHouseType(ABC): - """ - Base class for all ClickHouseType objects. - """ - __slots__ = 'nullable', 'low_card', 'wrappers', 'type_def', '__dict__' - _name_suffix = '' - encoding = 'utf8' - np_type = 'O' # Default to Numpy Object type - nano_divisor = 0 # Only relevant for date like objects - byte_size = 0 - valid_formats = 'native' - - python_type = None - base_type = None - - def __init_subclass__(cls, registered: bool = True): - if registered: - cls.base_type = cls.__name__ - type_map[cls.base_type] = cls - - @classmethod - def build(cls: Type['ClickHouseType'], type_def: TypeDef): - return cls(type_def) - - @classmethod - def _active_format(cls, fmt_map: Dict[Type['ClickHouseType'], str], ctx: BaseQueryContext): - ctx_fmt = ctx.active_fmt(cls.base_type) - if ctx_fmt: - return ctx_fmt - return fmt_map.get(cls, 'native') - - @classmethod - def read_format(cls, ctx: BaseQueryContext): - return cls._active_format(ch_read_formats, ctx) - - @classmethod - def write_format(cls, ctx: BaseQueryContext): - return cls._active_format(ch_write_formats, ctx) - - def __init__(self, type_def: TypeDef): - """ - Base class constructor that sets Nullable and LowCardinality wrappers - :param type_def: ClickHouseType base configuration parameters - """ - self.type_def = type_def - self.wrappers = type_def.wrappers - self.low_card = 'LowCardinality' in self.wrappers - self.nullable = 'Nullable' in self.wrappers - - def __eq__(self, other): - return other.__class__ == self.__class__ and self.type_def == other.type_def - - def __hash__(self): - return hash((self.type_def, self.__class__)) - - @property - def name(self): - name = f'{self.base_type}{self._name_suffix}' - for wrapper in reversed(self.wrappers): - name = f'{wrapper}({name})' - return name - - def data_size(self, sample: Collection) -> int: - if self.low_card: - values = set(sample) - d_size = self._data_size(values) + 2 - else: - d_size = self._data_size(sample) - if self.nullable: - d_size += 1 - return d_size - - def _data_size(self, _sample: Collection) -> int: - if self.byte_size: - return self.byte_size - return 0 - - def write_column_prefix(self, dest: bytearray): - """ - Prefix is primarily used is for the LowCardinality version (but see the JSON data type). Because of the - way the ClickHouse C++ code is written, this must be done before any data is written even if the - LowCardinality column is within a container. The only recognized low cardinality version is 1 - :param dest: The native protocol binary write buffer - """ - if self.low_card: - write_uint64(low_card_version, dest) - - def read_column_prefix(self, source: ByteSource): - """ - Read the low cardinality version. Like the write method, this has to happen immediately for container classes - :param source: The native protocol binary read buffer - :return: updated read pointer - """ - if self.low_card: - v = source.read_uint64() - if v != low_card_version: - logger.warning('Unexpected low cardinality version %d reading type %s', v, self.name) - - def read_column(self, source: ByteSource, num_rows: int, ctx: QueryContext) -> Sequence: - """ - Wrapping read method for all ClickHouseType data types. Only overridden for container classes so that - the LowCardinality version is read for the contained types - :param source: Native protocol binary read buffer - :param num_rows: Number of rows expected in the column - :param ctx: QueryContext for query specific settings - :return: The decoded column data as a sequence and the updated location pointer - """ - self.read_column_prefix(source) - return self.read_column_data(source, num_rows, ctx) - - def read_column_data(self, source: ByteSource, num_rows: int, ctx: QueryContext) -> Sequence: - """ - Public read method for all ClickHouseType data type columns - :param source: Native protocol binary read buffer - :param num_rows: Number of rows expected in the column - :param ctx: QueryContext for query specific settings - :return: The decoded column plus the updated location pointer - """ - if self.low_card: - column = self._read_low_card_column(source, num_rows, ctx) - elif self.nullable: - column = self._read_nullable_column(source, num_rows, ctx) - else: - column = self._read_column_binary(source, num_rows, ctx) - return self._finalize_column(column, ctx) - - def _read_nullable_column(self, source: ByteSource, num_rows: int, ctx: QueryContext) -> Sequence: - null_map = source.read_bytes(num_rows) - column = self._read_column_binary(source, num_rows, ctx) - null_obj = self._active_null(ctx) - return data_conv.build_nullable_column(column, null_map, null_obj) - - # The binary methods are really abstract, but they aren't implemented for container classes which - # delegate binary operations to their elements - - # pylint: disable=no-self-use - def _read_column_binary(self, - _source: ByteSource, - _num_rows: int, _ctx: QueryContext) -> Union[Sequence, MutableSequence]: - """ - Lowest level read method for ClickHouseType native data columns - :param _source: Native protocol binary read buffer - :param _num_rows: Expected number of rows in the column - :return: Decoded column plus updated read buffer - """ - return [], 0 - - def _finalize_column(self, column: Sequence, _ctx: QueryContext) -> Sequence: - return column - - def _write_column_binary(self, column: Union[Sequence, MutableSequence], dest: bytearray, ctx: InsertContext): - """ - Lowest level write method for ClickHouseType data columns - :param column: Python data column - :param dest: Native protocol write buffer - :param ctx: Insert Context with insert specific settings - """ - - def write_column(self, column: Sequence, dest: bytearray, ctx: InsertContext): - """ - Wrapping write method for ClickHouseTypes. Only overridden for container types that so that - the write_native_prefix is done at the right time for contained types - :param column: Column/sequence of Python values to write - :param dest: Native binary write buffer - :param ctx: Insert Context with insert specific settings - """ - self.write_column_prefix(dest) - self.write_column_data(column, dest, ctx) - - def write_column_data(self, column: Sequence, dest: bytearray, ctx: InsertContext): - """ - Public native write method for ClickHouseTypes. Delegates the actual write to either the LowCardinality - write method or the _write_native_binary method of the type - :param column: Sequence of Python data - :param dest: Native binary write buffer - :param ctx: Insert Context with insert specific settings - """ - if self.low_card: - self._write_column_low_card(column, dest, ctx) - else: - if self.nullable: - dest += bytes([1 if x is None else 0 for x in column]) - self._write_column_binary(column, dest, ctx) - - # pylint: disable=no-member - def _read_low_card_column(self, source: ByteSource, num_rows: int, ctx: QueryContext): - if num_rows == 0: - return [] - key_data = source.read_uint64() - index_sz = 2 ** (key_data & 0xff) - key_cnt = source.read_uint64() - keys = self._read_column_binary(source, key_cnt, ctx) - index_cnt = source.read_uint64() - index = source.read_array(array_type(index_sz, False), index_cnt) - if self.nullable: - return self._build_lc_nullable_column(keys, index, ctx) - return self._build_lc_column(keys, index, ctx) - - def _build_lc_column(self, keys: Sequence, index: array.array, _ctx: QueryContext): - return [keys[ix] for ix in index] - - def _build_lc_nullable_column(self, keys: Sequence, index: array.array, ctx: QueryContext): - return data_conv.build_lc_nullable_column(keys, index, self._active_null(ctx)) - - def _write_column_low_card(self, column: Sequence, dest: bytearray, ctx: InsertContext): - if len(column) == 0: - return - index = [] - keys = [] - rev_map = {} - rmg = rev_map.get - if self.nullable: - keys.append(None) - key = 1 - for x in column: - if x is None: - index.append(0) - else: - ix = rmg(x) - if ix is None: - index.append(key) - keys.append(x) - rev_map[x] = key - key += 1 - else: - index.append(ix) - else: - key = 0 - for x in column: - ix = rmg(x) - if ix is None: - index.append(key) - keys.append(x) - rev_map[x] = key - key += 1 - else: - index.append(ix) - ix_type = int(log(len(keys), 2)) >> 3 # power of two bytes needed to store the total number of keys - write_uint64((1 << 9) | (1 << 10) | ix_type, dest) # Index type plus new dictionary (9) and additional keys(10) - write_uint64(len(keys), dest) - self._write_column_binary(keys, dest, ctx) - write_uint64(len(index), dest) - write_array(array_type(1 << ix_type, False), index, dest) - - def _active_null(self, _ctx: QueryContext) -> Any: - return None - - def _first_value(self, column: Sequence) -> Optional[Any]: - if self.nullable: - return next((x for x in column if x is not None), None) - if len(column): - return column[0] - return None - - -EMPTY_TYPE_DEF = TypeDef() -NULLABLE_TYPE_DEF = TypeDef(wrappers=('Nullable',)) -LC_TYPE_DEF = TypeDef(wrappers=('LowCardinality',)) -type_map: Dict[str, Type[ClickHouseType]] = {} - - -class ArrayType(ClickHouseType, ABC, registered=False): - """ - ClickHouse type that utilizes Python or Numpy arrays for fast reads and writes of binary data. - arrays can only be used for ClickHouse types that can be translated into UInt64 (and smaller) integers - or Float32/64 - """ - _signed = True - _array_type = None - _struct_type = None - valid_formats = 'string', 'native' - python_type = int - - def __init_subclass__(cls, registered: bool = True): - super().__init_subclass__(registered) - if cls._array_type in ('i', 'I') and int_size == 2: - cls._array_type = 'L' if cls._array_type.isupper() else 'l' - if isinstance(cls._array_type, str) and cls._array_type: - cls._struct_type = '<' + cls._array_type - cls.byte_size = array.array(cls._array_type).itemsize - - def _read_column_binary(self, source: ByteSource, num_rows: int, ctx: QueryContext): - if ctx.use_numpy: - return numpy_conv.read_numpy_array(source, self.np_type, num_rows) - return source.read_array(self._array_type, num_rows) - - def _read_nullable_column(self, source: ByteSource, num_rows: int, ctx: QueryContext) -> Sequence: - return data_conv.read_nullable_array(source, self._array_type, num_rows, self._active_null(ctx)) - - def _build_lc_column(self, keys: Sequence, index: array.array, ctx: QueryContext): - if ctx.use_numpy: - return np.fromiter((keys[ix] for ix in index), dtype=keys.dtype, count=len(index)) - return super()._build_lc_column(keys, index, ctx) - - def _finalize_column(self, column: Sequence, ctx: QueryContext) -> Sequence: - if self.read_format(ctx) == 'string': - return [str(x) for x in column] - if ctx.use_extended_dtypes and self.nullable: - return pd.array(column, dtype=self.base_type) - if ctx.use_numpy and self.nullable and (not ctx.use_none): - return np.array(column, dtype=self.np_type) - return column - - def _write_column_binary(self, column: Union[Sequence, MutableSequence], dest: bytearray, ctx: InsertContext): - if len(column) and self.nullable: - column = [0 if x is None else x for x in column] - write_array(self._array_type, column, dest) - - def _active_null(self, ctx: QueryContext): - if ctx.as_pandas and ctx.use_extended_dtypes: - return pd.NA - if ctx.use_none: - return None - return 0 - - -class UnsupportedType(ClickHouseType, ABC, registered=False): - """ - Base class for ClickHouse types that can't be serialized/deserialized into Python types. - Mostly useful just for DDL statements - """ - def __init__(self, type_def: TypeDef): - super().__init__(type_def) - self._name_suffix = type_def.arg_str - - def _read_column_binary(self, source: Sequence, num_rows: int, ctx: QueryContext): - raise NotSupportedError(f'{self.name} deserialization not supported') - - def _write_column_binary(self, column: Union[Sequence, MutableSequence], dest: bytearray, ctx: InsertContext): - raise NotSupportedError(f'{self.name} serialization not supported') diff --git a/spaces/Superlang/ImageProcessor/annotator/leres/pix2pix/util/visualizer.py b/spaces/Superlang/ImageProcessor/annotator/leres/pix2pix/util/visualizer.py deleted file mode 100644 index 810a0513ab997103ace77b665c9a17f223b173c9..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/leres/pix2pix/util/visualizer.py +++ /dev/null @@ -1,166 +0,0 @@ -import numpy as np -import os -import sys -import ntpath -import time -from . import util, html -from subprocess import Popen, PIPE -import torch - - -if sys.version_info[0] == 2: - VisdomExceptionBase = Exception -else: - VisdomExceptionBase = ConnectionError - - -def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256): - """Save images to the disk. - - Parameters: - webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details) - visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs - image_path (str) -- the string is used to create image paths - aspect_ratio (float) -- the aspect ratio of saved images - width (int) -- the images will be resized to width x width - - This function will save images stored in 'visuals' to the HTML file specified by 'webpage'. - """ - image_dir = webpage.get_image_dir() - short_path = ntpath.basename(image_path[0]) - name = os.path.splitext(short_path)[0] - - webpage.add_header(name) - ims, txts, links = [], [], [] - - for label, im_data in visuals.items(): - im = util.tensor2im(im_data) - image_name = '%s_%s.png' % (name, label) - save_path = os.path.join(image_dir, image_name) - util.save_image(im, save_path, aspect_ratio=aspect_ratio) - ims.append(image_name) - txts.append(label) - links.append(image_name) - webpage.add_images(ims, txts, links, width=width) - - -class Visualizer(): - """This class includes several functions that can display/save images and print/save logging information. - - It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images. - """ - - def __init__(self, opt): - """Initialize the Visualizer class - - Parameters: - opt -- stores all the experiment flags; needs to be a subclass of BaseOptions - Step 1: Cache the training/test options - Step 2: connect to a visdom server - Step 3: create an HTML object for saveing HTML filters - Step 4: create a logging file to store training losses - """ - self.opt = opt # cache the option - self.display_id = opt.display_id - self.use_html = opt.isTrain and not opt.no_html - self.win_size = opt.display_winsize - self.name = opt.name - self.port = opt.display_port - self.saved = False - - if self.use_html: # create an HTML object at /web/; images will be saved under /web/images/ - self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') - self.img_dir = os.path.join(self.web_dir, 'images') - print('create web directory %s...' % self.web_dir) - util.mkdirs([self.web_dir, self.img_dir]) - # create a logging file to store training losses - self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') - with open(self.log_name, "a") as log_file: - now = time.strftime("%c") - log_file.write('================ Training Loss (%s) ================\n' % now) - - def reset(self): - """Reset the self.saved status""" - self.saved = False - - def create_visdom_connections(self): - """If the program could not connect to Visdom server, this function will start a new server at port < self.port > """ - cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port - print('\n\nCould not connect to Visdom server. \n Trying to start a server....') - print('Command: %s' % cmd) - Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) - - def display_current_results(self, visuals, epoch, save_result): - """Display current results on visdom; save current results to an HTML file. - - Parameters: - visuals (OrderedDict) - - dictionary of images to display or save - epoch (int) - - the current epoch - save_result (bool) - - if save the current results to an HTML file - """ - if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved. - self.saved = True - # save images to the disk - for label, image in visuals.items(): - image_numpy = util.tensor2im(image) - img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label)) - util.save_image(image_numpy, img_path) - - # update website - webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1) - for n in range(epoch, 0, -1): - webpage.add_header('epoch [%d]' % n) - ims, txts, links = [], [], [] - - for label, image_numpy in visuals.items(): - # image_numpy = util.tensor2im(image) - img_path = 'epoch%.3d_%s.png' % (n, label) - ims.append(img_path) - txts.append(label) - links.append(img_path) - webpage.add_images(ims, txts, links, width=self.win_size) - webpage.save() - - # def plot_current_losses(self, epoch, counter_ratio, losses): - # """display the current losses on visdom display: dictionary of error labels and values - # - # Parameters: - # epoch (int) -- current epoch - # counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1 - # losses (OrderedDict) -- training losses stored in the format of (name, float) pairs - # """ - # if not hasattr(self, 'plot_data'): - # self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())} - # self.plot_data['X'].append(epoch + counter_ratio) - # self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']]) - # try: - # self.vis.line( - # X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1), - # Y=np.array(self.plot_data['Y']), - # opts={ - # 'title': self.name + ' loss over time', - # 'legend': self.plot_data['legend'], - # 'xlabel': 'epoch', - # 'ylabel': 'loss'}, - # win=self.display_id) - # except VisdomExceptionBase: - # self.create_visdom_connections() - - # losses: same format as |losses| of plot_current_losses - def print_current_losses(self, epoch, iters, losses, t_comp, t_data): - """print current losses on console; also save the losses to the disk - - Parameters: - epoch (int) -- current epoch - iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) - losses (OrderedDict) -- training losses stored in the format of (name, float) pairs - t_comp (float) -- computational time per data point (normalized by batch_size) - t_data (float) -- data loading time per data point (normalized by batch_size) - """ - message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data) - for k, v in losses.items(): - message += '%s: %.3f ' % (k, v) - - print(message) # print the message - with open(self.log_name, "a") as log_file: - log_file.write('%s\n' % message) # save the message diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/utils/logger.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/utils/logger.py deleted file mode 100644 index d77d42cbe86366e5d91e93311f92bb166c304184..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/utils/logger.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import atexit -import functools -import logging -import os -import sys -import time -from collections import Counter -import torch -from tabulate import tabulate -from termcolor import colored - -from annotator.oneformer.detectron2.utils.file_io import PathManager - -__all__ = ["setup_logger", "log_first_n", "log_every_n", "log_every_n_seconds"] - - -class _ColorfulFormatter(logging.Formatter): - def __init__(self, *args, **kwargs): - self._root_name = kwargs.pop("root_name") + "." - self._abbrev_name = kwargs.pop("abbrev_name", "") - if len(self._abbrev_name): - self._abbrev_name = self._abbrev_name + "." - super(_ColorfulFormatter, self).__init__(*args, **kwargs) - - def formatMessage(self, record): - record.name = record.name.replace(self._root_name, self._abbrev_name) - log = super(_ColorfulFormatter, self).formatMessage(record) - if record.levelno == logging.WARNING: - prefix = colored("WARNING", "red", attrs=["blink"]) - elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL: - prefix = colored("ERROR", "red", attrs=["blink", "underline"]) - else: - return log - return prefix + " " + log - - -@functools.lru_cache() # so that calling setup_logger multiple times won't add many handlers -def setup_logger( - output=None, distributed_rank=0, *, color=True, name="detectron2", abbrev_name=None -): - """ - Initialize the detectron2 logger and set its verbosity level to "DEBUG". - - Args: - output (str): a file name or a directory to save log. If None, will not save log file. - If ends with ".txt" or ".log", assumed to be a file name. - Otherwise, logs will be saved to `output/log.txt`. - name (str): the root module name of this logger - abbrev_name (str): an abbreviation of the module, to avoid long names in logs. - Set to "" to not log the root module in logs. - By default, will abbreviate "detectron2" to "d2" and leave other - modules unchanged. - - Returns: - logging.Logger: a logger - """ - logger = logging.getLogger(name) - logger.setLevel(logging.DEBUG) - logger.propagate = False - - if abbrev_name is None: - abbrev_name = "d2" if name == "detectron2" else name - - plain_formatter = logging.Formatter( - "[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S" - ) - # stdout logging: master only - if distributed_rank == 0: - ch = logging.StreamHandler(stream=sys.stdout) - ch.setLevel(logging.DEBUG) - if color: - formatter = _ColorfulFormatter( - colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s", - datefmt="%m/%d %H:%M:%S", - root_name=name, - abbrev_name=str(abbrev_name), - ) - else: - formatter = plain_formatter - ch.setFormatter(formatter) - logger.addHandler(ch) - - # file logging: all workers - if output is not None: - if output.endswith(".txt") or output.endswith(".log"): - filename = output - else: - filename = os.path.join(output, "log.txt") - if distributed_rank > 0: - filename = filename + ".rank{}".format(distributed_rank) - PathManager.mkdirs(os.path.dirname(filename)) - - fh = logging.StreamHandler(_cached_log_stream(filename)) - fh.setLevel(logging.DEBUG) - fh.setFormatter(plain_formatter) - logger.addHandler(fh) - - return logger - - -# cache the opened file object, so that different calls to `setup_logger` -# with the same file name can safely write to the same file. -@functools.lru_cache(maxsize=None) -def _cached_log_stream(filename): - # use 1K buffer if writing to cloud storage - io = PathManager.open(filename, "a", buffering=1024 if "://" in filename else -1) - atexit.register(io.close) - return io - - -""" -Below are some other convenient logging methods. -They are mainly adopted from -https://github.com/abseil/abseil-py/blob/master/absl/logging/__init__.py -""" - - -def _find_caller(): - """ - Returns: - str: module name of the caller - tuple: a hashable key to be used to identify different callers - """ - frame = sys._getframe(2) - while frame: - code = frame.f_code - if os.path.join("utils", "logger.") not in code.co_filename: - mod_name = frame.f_globals["__name__"] - if mod_name == "__main__": - mod_name = "detectron2" - return mod_name, (code.co_filename, frame.f_lineno, code.co_name) - frame = frame.f_back - - -_LOG_COUNTER = Counter() -_LOG_TIMER = {} - - -def log_first_n(lvl, msg, n=1, *, name=None, key="caller"): - """ - Log only for the first n times. - - Args: - lvl (int): the logging level - msg (str): - n (int): - name (str): name of the logger to use. Will use the caller's module by default. - key (str or tuple[str]): the string(s) can be one of "caller" or - "message", which defines how to identify duplicated logs. - For example, if called with `n=1, key="caller"`, this function - will only log the first call from the same caller, regardless of - the message content. - If called with `n=1, key="message"`, this function will log the - same content only once, even if they are called from different places. - If called with `n=1, key=("caller", "message")`, this function - will not log only if the same caller has logged the same message before. - """ - if isinstance(key, str): - key = (key,) - assert len(key) > 0 - - caller_module, caller_key = _find_caller() - hash_key = () - if "caller" in key: - hash_key = hash_key + caller_key - if "message" in key: - hash_key = hash_key + (msg,) - - _LOG_COUNTER[hash_key] += 1 - if _LOG_COUNTER[hash_key] <= n: - logging.getLogger(name or caller_module).log(lvl, msg) - - -def log_every_n(lvl, msg, n=1, *, name=None): - """ - Log once per n times. - - Args: - lvl (int): the logging level - msg (str): - n (int): - name (str): name of the logger to use. Will use the caller's module by default. - """ - caller_module, key = _find_caller() - _LOG_COUNTER[key] += 1 - if n == 1 or _LOG_COUNTER[key] % n == 1: - logging.getLogger(name or caller_module).log(lvl, msg) - - -def log_every_n_seconds(lvl, msg, n=1, *, name=None): - """ - Log no more than once per n seconds. - - Args: - lvl (int): the logging level - msg (str): - n (int): - name (str): name of the logger to use. Will use the caller's module by default. - """ - caller_module, key = _find_caller() - last_logged = _LOG_TIMER.get(key, None) - current_time = time.time() - if last_logged is None or current_time - last_logged >= n: - logging.getLogger(name or caller_module).log(lvl, msg) - _LOG_TIMER[key] = current_time - - -def create_small_table(small_dict): - """ - Create a small table using the keys of small_dict as headers. This is only - suitable for small dictionaries. - - Args: - small_dict (dict): a result dictionary of only a few items. - - Returns: - str: the table as a string. - """ - keys, values = tuple(zip(*small_dict.items())) - table = tabulate( - [values], - headers=keys, - tablefmt="pipe", - floatfmt=".3f", - stralign="center", - numalign="center", - ) - return table - - -def _log_api_usage(identifier: str): - """ - Internal function used to log the usage of different detectron2 components - inside facebook's infra. - """ - torch._C._log_api_usage_once("detectron2." + identifier) diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/upernet_uniformer.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/upernet_uniformer.py deleted file mode 100644 index 41aa4db809dc6e2c508e98051f61807d07477903..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/upernet_uniformer.py +++ /dev/null @@ -1,43 +0,0 @@ -# model settings -norm_cfg = dict(type='BN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained=None, - backbone=dict( - type='UniFormer', - embed_dim=[64, 128, 320, 512], - layers=[3, 4, 8, 3], - head_dim=64, - mlp_ratio=4., - qkv_bias=True, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.1), - decode_head=dict( - type='UPerHead', - in_channels=[64, 128, 320, 512], - in_index=[0, 1, 2, 3], - pool_scales=(1, 2, 3, 6), - channels=512, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=320, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) \ No newline at end of file diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/ops/fused_bias_leakyrelu.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/ops/fused_bias_leakyrelu.py deleted file mode 100644 index 6d12508469c6c8fa1884debece44c58d158cb6fa..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/ops/fused_bias_leakyrelu.py +++ /dev/null @@ -1,268 +0,0 @@ -# modified from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/fused_act.py # noqa:E501 - -# Copyright (c) 2021, NVIDIA Corporation. All rights reserved. -# NVIDIA Source Code License for StyleGAN2 with Adaptive Discriminator -# Augmentation (ADA) -# ======================================================================= - -# 1. Definitions - -# "Licensor" means any person or entity that distributes its Work. - -# "Software" means the original work of authorship made available under -# this License. - -# "Work" means the Software and any additions to or derivative works of -# the Software that are made available under this License. - -# The terms "reproduce," "reproduction," "derivative works," and -# "distribution" have the meaning as provided under U.S. copyright law; -# provided, however, that for the purposes of this License, derivative -# works shall not include works that remain separable from, or merely -# link (or bind by name) to the interfaces of, the Work. - -# Works, including the Software, are "made available" under this License -# by including in or with the Work either (a) a copyright notice -# referencing the applicability of this License to the Work, or (b) a -# copy of this License. - -# 2. License Grants - -# 2.1 Copyright Grant. Subject to the terms and conditions of this -# License, each Licensor grants to you a perpetual, worldwide, -# non-exclusive, royalty-free, copyright license to reproduce, -# prepare derivative works of, publicly display, publicly perform, -# sublicense and distribute its Work and any resulting derivative -# works in any form. - -# 3. Limitations - -# 3.1 Redistribution. You may reproduce or distribute the Work only -# if (a) you do so under this License, (b) you include a complete -# copy of this License with your distribution, and (c) you retain -# without modification any copyright, patent, trademark, or -# attribution notices that are present in the Work. - -# 3.2 Derivative Works. You may specify that additional or different -# terms apply to the use, reproduction, and distribution of your -# derivative works of the Work ("Your Terms") only if (a) Your Terms -# provide that the use limitation in Section 3.3 applies to your -# derivative works, and (b) you identify the specific derivative -# works that are subject to Your Terms. Notwithstanding Your Terms, -# this License (including the redistribution requirements in Section -# 3.1) will continue to apply to the Work itself. - -# 3.3 Use Limitation. The Work and any derivative works thereof only -# may be used or intended for use non-commercially. Notwithstanding -# the foregoing, NVIDIA and its affiliates may use the Work and any -# derivative works commercially. As used herein, "non-commercially" -# means for research or evaluation purposes only. - -# 3.4 Patent Claims. If you bring or threaten to bring a patent claim -# against any Licensor (including any claim, cross-claim or -# counterclaim in a lawsuit) to enforce any patents that you allege -# are infringed by any Work, then your rights under this License from -# such Licensor (including the grant in Section 2.1) will terminate -# immediately. - -# 3.5 Trademarks. This License does not grant any rights to use any -# Licensor’s or its affiliates’ names, logos, or trademarks, except -# as necessary to reproduce the notices described in this License. - -# 3.6 Termination. If you violate any term of this License, then your -# rights under this License (including the grant in Section 2.1) will -# terminate immediately. - -# 4. Disclaimer of Warranty. - -# THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR -# NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER -# THIS LICENSE. - -# 5. Limitation of Liability. - -# EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL -# THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE -# SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, -# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF -# OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK -# (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, -# LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER -# COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF -# THE POSSIBILITY OF SUCH DAMAGES. - -# ======================================================================= - -import torch -import torch.nn.functional as F -from torch import nn -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', ['fused_bias_leakyrelu']) - - -class FusedBiasLeakyReLUFunctionBackward(Function): - """Calculate second order deviation. - - This function is to compute the second order deviation for the fused leaky - relu operation. - """ - - @staticmethod - def forward(ctx, grad_output, out, negative_slope, scale): - ctx.save_for_backward(out) - ctx.negative_slope = negative_slope - ctx.scale = scale - - empty = grad_output.new_empty(0) - - grad_input = ext_module.fused_bias_leakyrelu( - grad_output, - empty, - out, - act=3, - grad=1, - alpha=negative_slope, - scale=scale) - - dim = [0] - - if grad_input.ndim > 2: - dim += list(range(2, grad_input.ndim)) - - grad_bias = grad_input.sum(dim).detach() - - return grad_input, grad_bias - - @staticmethod - def backward(ctx, gradgrad_input, gradgrad_bias): - out, = ctx.saved_tensors - - # The second order deviation, in fact, contains two parts, while the - # the first part is zero. Thus, we direct consider the second part - # which is similar with the first order deviation in implementation. - gradgrad_out = ext_module.fused_bias_leakyrelu( - gradgrad_input, - gradgrad_bias.to(out.dtype), - out, - act=3, - grad=1, - alpha=ctx.negative_slope, - scale=ctx.scale) - - return gradgrad_out, None, None, None - - -class FusedBiasLeakyReLUFunction(Function): - - @staticmethod - def forward(ctx, input, bias, negative_slope, scale): - empty = input.new_empty(0) - - out = ext_module.fused_bias_leakyrelu( - input, - bias, - empty, - act=3, - grad=0, - alpha=negative_slope, - scale=scale) - ctx.save_for_backward(out) - ctx.negative_slope = negative_slope - ctx.scale = scale - - return out - - @staticmethod - def backward(ctx, grad_output): - out, = ctx.saved_tensors - - grad_input, grad_bias = FusedBiasLeakyReLUFunctionBackward.apply( - grad_output, out, ctx.negative_slope, ctx.scale) - - return grad_input, grad_bias, None, None - - -class FusedBiasLeakyReLU(nn.Module): - """Fused bias leaky ReLU. - - This function is introduced in the StyleGAN2: - http://arxiv.org/abs/1912.04958 - - The bias term comes from the convolution operation. In addition, to keep - the variance of the feature map or gradients unchanged, they also adopt a - scale similarly with Kaiming initialization. However, since the - :math:`1+{alpha}^2` : is too small, we can just ignore it. Therefore, the - final scale is just :math:`\sqrt{2}`:. Of course, you may change it with # noqa: W605, E501 - your own scale. - - TODO: Implement the CPU version. - - Args: - channel (int): The channel number of the feature map. - negative_slope (float, optional): Same as nn.LeakyRelu. - Defaults to 0.2. - scale (float, optional): A scalar to adjust the variance of the feature - map. Defaults to 2**0.5. - """ - - def __init__(self, num_channels, negative_slope=0.2, scale=2**0.5): - super(FusedBiasLeakyReLU, self).__init__() - - self.bias = nn.Parameter(torch.zeros(num_channels)) - self.negative_slope = negative_slope - self.scale = scale - - def forward(self, input): - return fused_bias_leakyrelu(input, self.bias, self.negative_slope, - self.scale) - - -def fused_bias_leakyrelu(input, bias, negative_slope=0.2, scale=2**0.5): - """Fused bias leaky ReLU function. - - This function is introduced in the StyleGAN2: - http://arxiv.org/abs/1912.04958 - - The bias term comes from the convolution operation. In addition, to keep - the variance of the feature map or gradients unchanged, they also adopt a - scale similarly with Kaiming initialization. However, since the - :math:`1+{alpha}^2` : is too small, we can just ignore it. Therefore, the - final scale is just :math:`\sqrt{2}`:. Of course, you may change it with # noqa: W605, E501 - your own scale. - - Args: - input (torch.Tensor): Input feature map. - bias (nn.Parameter): The bias from convolution operation. - negative_slope (float, optional): Same as nn.LeakyRelu. - Defaults to 0.2. - scale (float, optional): A scalar to adjust the variance of the feature - map. Defaults to 2**0.5. - - Returns: - torch.Tensor: Feature map after non-linear activation. - """ - - if not input.is_cuda: - return bias_leakyrelu_ref(input, bias, negative_slope, scale) - - return FusedBiasLeakyReLUFunction.apply(input, bias.to(input.dtype), - negative_slope, scale) - - -def bias_leakyrelu_ref(x, bias, negative_slope=0.2, scale=2**0.5): - - if bias is not None: - assert bias.ndim == 1 - assert bias.shape[0] == x.shape[1] - x = x + bias.reshape([-1 if i == 1 else 1 for i in range(x.ndim)]) - - x = F.leaky_relu(x, negative_slope) - if scale != 1: - x = x * scale - - return x diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/ops/roipoint_pool3d.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/ops/roipoint_pool3d.py deleted file mode 100644 index 0a21412c0728431c04b84245bc2e3109eea9aefc..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/ops/roipoint_pool3d.py +++ /dev/null @@ -1,77 +0,0 @@ -from torch import nn as nn -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', ['roipoint_pool3d_forward']) - - -class RoIPointPool3d(nn.Module): - """Encode the geometry-specific features of each 3D proposal. - - Please refer to `Paper of PartA2 `_ - for more details. - - Args: - num_sampled_points (int, optional): Number of samples in each roi. - Default: 512. - """ - - def __init__(self, num_sampled_points=512): - super().__init__() - self.num_sampled_points = num_sampled_points - - def forward(self, points, point_features, boxes3d): - """ - Args: - points (torch.Tensor): Input points whose shape is (B, N, C). - point_features (torch.Tensor): Features of input points whose shape - is (B, N, C). - boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7). - - Returns: - pooled_features (torch.Tensor): The output pooled features whose - shape is (B, M, 512, 3 + C). - pooled_empty_flag (torch.Tensor): Empty flag whose shape is (B, M). - """ - return RoIPointPool3dFunction.apply(points, point_features, boxes3d, - self.num_sampled_points) - - -class RoIPointPool3dFunction(Function): - - @staticmethod - def forward(ctx, points, point_features, boxes3d, num_sampled_points=512): - """ - Args: - points (torch.Tensor): Input points whose shape is (B, N, C). - point_features (torch.Tensor): Features of input points whose shape - is (B, N, C). - boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7). - num_sampled_points (int, optional): The num of sampled points. - Default: 512. - - Returns: - pooled_features (torch.Tensor): The output pooled features whose - shape is (B, M, 512, 3 + C). - pooled_empty_flag (torch.Tensor): Empty flag whose shape is (B, M). - """ - assert len(points.shape) == 3 and points.shape[2] == 3 - batch_size, boxes_num, feature_len = points.shape[0], boxes3d.shape[ - 1], point_features.shape[2] - pooled_boxes3d = boxes3d.view(batch_size, -1, 7) - pooled_features = point_features.new_zeros( - (batch_size, boxes_num, num_sampled_points, 3 + feature_len)) - pooled_empty_flag = point_features.new_zeros( - (batch_size, boxes_num)).int() - - ext_module.roipoint_pool3d_forward(points.contiguous(), - pooled_boxes3d.contiguous(), - point_features.contiguous(), - pooled_features, pooled_empty_flag) - - return pooled_features, pooled_empty_flag - - @staticmethod - def backward(ctx, grad_out): - raise NotImplementedError diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/hooks/evaluation.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/hooks/evaluation.py deleted file mode 100644 index 4d00999ce5665c53bded8de9e084943eee2d230d..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/hooks/evaluation.py +++ /dev/null @@ -1,509 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -import warnings -from math import inf - -import torch.distributed as dist -from torch.nn.modules.batchnorm import _BatchNorm -from torch.utils.data import DataLoader - -from annotator.uniformer.mmcv.fileio import FileClient -from annotator.uniformer.mmcv.utils import is_seq_of -from .hook import Hook -from .logger import LoggerHook - - -class EvalHook(Hook): - """Non-Distributed evaluation hook. - - This hook will regularly perform evaluation in a given interval when - performing in non-distributed environment. - - Args: - dataloader (DataLoader): A PyTorch dataloader, whose dataset has - implemented ``evaluate`` function. - start (int | None, optional): Evaluation starting epoch. It enables - evaluation before the training starts if ``start`` <= the resuming - epoch. If None, whether to evaluate is merely decided by - ``interval``. Default: None. - interval (int): Evaluation interval. Default: 1. - by_epoch (bool): Determine perform evaluation by epoch or by iteration. - If set to True, it will perform by epoch. Otherwise, by iteration. - Default: True. - save_best (str, optional): If a metric is specified, it would measure - the best checkpoint during evaluation. The information about best - checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep - best score value and best checkpoint path, which will be also - loaded when resume checkpoint. Options are the evaluation metrics - on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox - detection and instance segmentation. ``AR@100`` for proposal - recall. If ``save_best`` is ``auto``, the first key of the returned - ``OrderedDict`` result will be used. Default: None. - rule (str | None, optional): Comparison rule for best score. If set to - None, it will infer a reasonable rule. Keys such as 'acc', 'top' - .etc will be inferred by 'greater' rule. Keys contain 'loss' will - be inferred by 'less' rule. Options are 'greater', 'less', None. - Default: None. - test_fn (callable, optional): test a model with samples from a - dataloader, and return the test results. If ``None``, the default - test function ``mmcv.engine.single_gpu_test`` will be used. - (default: ``None``) - greater_keys (List[str] | None, optional): Metric keys that will be - inferred by 'greater' comparison rule. If ``None``, - _default_greater_keys will be used. (default: ``None``) - less_keys (List[str] | None, optional): Metric keys that will be - inferred by 'less' comparison rule. If ``None``, _default_less_keys - will be used. (default: ``None``) - out_dir (str, optional): The root directory to save checkpoints. If not - specified, `runner.work_dir` will be used by default. If specified, - the `out_dir` will be the concatenation of `out_dir` and the last - level directory of `runner.work_dir`. - `New in version 1.3.16.` - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. Default: None. - `New in version 1.3.16.` - **eval_kwargs: Evaluation arguments fed into the evaluate function of - the dataset. - - Notes: - If new arguments are added for EvalHook, tools/test.py, - tools/eval_metric.py may be affected. - """ - - # Since the key for determine greater or less is related to the downstream - # tasks, downstream repos may need to overwrite the following inner - # variable accordingly. - - rule_map = {'greater': lambda x, y: x > y, 'less': lambda x, y: x < y} - init_value_map = {'greater': -inf, 'less': inf} - _default_greater_keys = [ - 'acc', 'top', 'AR@', 'auc', 'precision', 'mAP', 'mDice', 'mIoU', - 'mAcc', 'aAcc' - ] - _default_less_keys = ['loss'] - - def __init__(self, - dataloader, - start=None, - interval=1, - by_epoch=True, - save_best=None, - rule=None, - test_fn=None, - greater_keys=None, - less_keys=None, - out_dir=None, - file_client_args=None, - **eval_kwargs): - if not isinstance(dataloader, DataLoader): - raise TypeError(f'dataloader must be a pytorch DataLoader, ' - f'but got {type(dataloader)}') - - if interval <= 0: - raise ValueError(f'interval must be a positive number, ' - f'but got {interval}') - - assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean' - - if start is not None and start < 0: - raise ValueError(f'The evaluation start epoch {start} is smaller ' - f'than 0') - - self.dataloader = dataloader - self.interval = interval - self.start = start - self.by_epoch = by_epoch - - assert isinstance(save_best, str) or save_best is None, \ - '""save_best"" should be a str or None ' \ - f'rather than {type(save_best)}' - self.save_best = save_best - self.eval_kwargs = eval_kwargs - self.initial_flag = True - - if test_fn is None: - from annotator.uniformer.mmcv.engine import single_gpu_test - self.test_fn = single_gpu_test - else: - self.test_fn = test_fn - - if greater_keys is None: - self.greater_keys = self._default_greater_keys - else: - if not isinstance(greater_keys, (list, tuple)): - greater_keys = (greater_keys, ) - assert is_seq_of(greater_keys, str) - self.greater_keys = greater_keys - - if less_keys is None: - self.less_keys = self._default_less_keys - else: - if not isinstance(less_keys, (list, tuple)): - less_keys = (less_keys, ) - assert is_seq_of(less_keys, str) - self.less_keys = less_keys - - if self.save_best is not None: - self.best_ckpt_path = None - self._init_rule(rule, self.save_best) - - self.out_dir = out_dir - self.file_client_args = file_client_args - - def _init_rule(self, rule, key_indicator): - """Initialize rule, key_indicator, comparison_func, and best score. - - Here is the rule to determine which rule is used for key indicator - when the rule is not specific (note that the key indicator matching - is case-insensitive): - 1. If the key indicator is in ``self.greater_keys``, the rule will be - specified as 'greater'. - 2. Or if the key indicator is in ``self.less_keys``, the rule will be - specified as 'less'. - 3. Or if the key indicator is equal to the substring in any one item - in ``self.greater_keys``, the rule will be specified as 'greater'. - 4. Or if the key indicator is equal to the substring in any one item - in ``self.less_keys``, the rule will be specified as 'less'. - - Args: - rule (str | None): Comparison rule for best score. - key_indicator (str | None): Key indicator to determine the - comparison rule. - """ - if rule not in self.rule_map and rule is not None: - raise KeyError(f'rule must be greater, less or None, ' - f'but got {rule}.') - - if rule is None: - if key_indicator != 'auto': - # `_lc` here means we use the lower case of keys for - # case-insensitive matching - key_indicator_lc = key_indicator.lower() - greater_keys = [key.lower() for key in self.greater_keys] - less_keys = [key.lower() for key in self.less_keys] - - if key_indicator_lc in greater_keys: - rule = 'greater' - elif key_indicator_lc in less_keys: - rule = 'less' - elif any(key in key_indicator_lc for key in greater_keys): - rule = 'greater' - elif any(key in key_indicator_lc for key in less_keys): - rule = 'less' - else: - raise ValueError(f'Cannot infer the rule for key ' - f'{key_indicator}, thus a specific rule ' - f'must be specified.') - self.rule = rule - self.key_indicator = key_indicator - if self.rule is not None: - self.compare_func = self.rule_map[self.rule] - - def before_run(self, runner): - if not self.out_dir: - self.out_dir = runner.work_dir - - self.file_client = FileClient.infer_client(self.file_client_args, - self.out_dir) - - # if `self.out_dir` is not equal to `runner.work_dir`, it means that - # `self.out_dir` is set so the final `self.out_dir` is the - # concatenation of `self.out_dir` and the last level directory of - # `runner.work_dir` - if self.out_dir != runner.work_dir: - basename = osp.basename(runner.work_dir.rstrip(osp.sep)) - self.out_dir = self.file_client.join_path(self.out_dir, basename) - runner.logger.info( - (f'The best checkpoint will be saved to {self.out_dir} by ' - f'{self.file_client.name}')) - - if self.save_best is not None: - if runner.meta is None: - warnings.warn('runner.meta is None. Creating an empty one.') - runner.meta = dict() - runner.meta.setdefault('hook_msgs', dict()) - self.best_ckpt_path = runner.meta['hook_msgs'].get( - 'best_ckpt', None) - - def before_train_iter(self, runner): - """Evaluate the model only at the start of training by iteration.""" - if self.by_epoch or not self.initial_flag: - return - if self.start is not None and runner.iter >= self.start: - self.after_train_iter(runner) - self.initial_flag = False - - def before_train_epoch(self, runner): - """Evaluate the model only at the start of training by epoch.""" - if not (self.by_epoch and self.initial_flag): - return - if self.start is not None and runner.epoch >= self.start: - self.after_train_epoch(runner) - self.initial_flag = False - - def after_train_iter(self, runner): - """Called after every training iter to evaluate the results.""" - if not self.by_epoch and self._should_evaluate(runner): - # Because the priority of EvalHook is higher than LoggerHook, the - # training log and the evaluating log are mixed. Therefore, - # we need to dump the training log and clear it before evaluating - # log is generated. In addition, this problem will only appear in - # `IterBasedRunner` whose `self.by_epoch` is False, because - # `EpochBasedRunner` whose `self.by_epoch` is True calls - # `_do_evaluate` in `after_train_epoch` stage, and at this stage - # the training log has been printed, so it will not cause any - # problem. more details at - # https://github.com/open-mmlab/mmsegmentation/issues/694 - for hook in runner._hooks: - if isinstance(hook, LoggerHook): - hook.after_train_iter(runner) - runner.log_buffer.clear() - - self._do_evaluate(runner) - - def after_train_epoch(self, runner): - """Called after every training epoch to evaluate the results.""" - if self.by_epoch and self._should_evaluate(runner): - self._do_evaluate(runner) - - def _do_evaluate(self, runner): - """perform evaluation and save ckpt.""" - results = self.test_fn(runner.model, self.dataloader) - runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) - key_score = self.evaluate(runner, results) - # the key_score may be `None` so it needs to skip the action to save - # the best checkpoint - if self.save_best and key_score: - self._save_ckpt(runner, key_score) - - def _should_evaluate(self, runner): - """Judge whether to perform evaluation. - - Here is the rule to judge whether to perform evaluation: - 1. It will not perform evaluation during the epoch/iteration interval, - which is determined by ``self.interval``. - 2. It will not perform evaluation if the start time is larger than - current time. - 3. It will not perform evaluation when current time is larger than - the start time but during epoch/iteration interval. - - Returns: - bool: The flag indicating whether to perform evaluation. - """ - if self.by_epoch: - current = runner.epoch - check_time = self.every_n_epochs - else: - current = runner.iter - check_time = self.every_n_iters - - if self.start is None: - if not check_time(runner, self.interval): - # No evaluation during the interval. - return False - elif (current + 1) < self.start: - # No evaluation if start is larger than the current time. - return False - else: - # Evaluation only at epochs/iters 3, 5, 7... - # if start==3 and interval==2 - if (current + 1 - self.start) % self.interval: - return False - return True - - def _save_ckpt(self, runner, key_score): - """Save the best checkpoint. - - It will compare the score according to the compare function, write - related information (best score, best checkpoint path) and save the - best checkpoint into ``work_dir``. - """ - if self.by_epoch: - current = f'epoch_{runner.epoch + 1}' - cur_type, cur_time = 'epoch', runner.epoch + 1 - else: - current = f'iter_{runner.iter + 1}' - cur_type, cur_time = 'iter', runner.iter + 1 - - best_score = runner.meta['hook_msgs'].get( - 'best_score', self.init_value_map[self.rule]) - if self.compare_func(key_score, best_score): - best_score = key_score - runner.meta['hook_msgs']['best_score'] = best_score - - if self.best_ckpt_path and self.file_client.isfile( - self.best_ckpt_path): - self.file_client.remove(self.best_ckpt_path) - runner.logger.info( - (f'The previous best checkpoint {self.best_ckpt_path} was ' - 'removed')) - - best_ckpt_name = f'best_{self.key_indicator}_{current}.pth' - self.best_ckpt_path = self.file_client.join_path( - self.out_dir, best_ckpt_name) - runner.meta['hook_msgs']['best_ckpt'] = self.best_ckpt_path - - runner.save_checkpoint( - self.out_dir, best_ckpt_name, create_symlink=False) - runner.logger.info( - f'Now best checkpoint is saved as {best_ckpt_name}.') - runner.logger.info( - f'Best {self.key_indicator} is {best_score:0.4f} ' - f'at {cur_time} {cur_type}.') - - def evaluate(self, runner, results): - """Evaluate the results. - - Args: - runner (:obj:`mmcv.Runner`): The underlined training runner. - results (list): Output results. - """ - eval_res = self.dataloader.dataset.evaluate( - results, logger=runner.logger, **self.eval_kwargs) - - for name, val in eval_res.items(): - runner.log_buffer.output[name] = val - runner.log_buffer.ready = True - - if self.save_best is not None: - # If the performance of model is pool, the `eval_res` may be an - # empty dict and it will raise exception when `self.save_best` is - # not None. More details at - # https://github.com/open-mmlab/mmdetection/issues/6265. - if not eval_res: - warnings.warn( - 'Since `eval_res` is an empty dict, the behavior to save ' - 'the best checkpoint will be skipped in this evaluation.') - return None - - if self.key_indicator == 'auto': - # infer from eval_results - self._init_rule(self.rule, list(eval_res.keys())[0]) - return eval_res[self.key_indicator] - - return None - - -class DistEvalHook(EvalHook): - """Distributed evaluation hook. - - This hook will regularly perform evaluation in a given interval when - performing in distributed environment. - - Args: - dataloader (DataLoader): A PyTorch dataloader, whose dataset has - implemented ``evaluate`` function. - start (int | None, optional): Evaluation starting epoch. It enables - evaluation before the training starts if ``start`` <= the resuming - epoch. If None, whether to evaluate is merely decided by - ``interval``. Default: None. - interval (int): Evaluation interval. Default: 1. - by_epoch (bool): Determine perform evaluation by epoch or by iteration. - If set to True, it will perform by epoch. Otherwise, by iteration. - default: True. - save_best (str, optional): If a metric is specified, it would measure - the best checkpoint during evaluation. The information about best - checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep - best score value and best checkpoint path, which will be also - loaded when resume checkpoint. Options are the evaluation metrics - on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox - detection and instance segmentation. ``AR@100`` for proposal - recall. If ``save_best`` is ``auto``, the first key of the returned - ``OrderedDict`` result will be used. Default: None. - rule (str | None, optional): Comparison rule for best score. If set to - None, it will infer a reasonable rule. Keys such as 'acc', 'top' - .etc will be inferred by 'greater' rule. Keys contain 'loss' will - be inferred by 'less' rule. Options are 'greater', 'less', None. - Default: None. - test_fn (callable, optional): test a model with samples from a - dataloader in a multi-gpu manner, and return the test results. If - ``None``, the default test function ``mmcv.engine.multi_gpu_test`` - will be used. (default: ``None``) - tmpdir (str | None): Temporary directory to save the results of all - processes. Default: None. - gpu_collect (bool): Whether to use gpu or cpu to collect results. - Default: False. - broadcast_bn_buffer (bool): Whether to broadcast the - buffer(running_mean and running_var) of rank 0 to other rank - before evaluation. Default: True. - out_dir (str, optional): The root directory to save checkpoints. If not - specified, `runner.work_dir` will be used by default. If specified, - the `out_dir` will be the concatenation of `out_dir` and the last - level directory of `runner.work_dir`. - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. Default: None. - **eval_kwargs: Evaluation arguments fed into the evaluate function of - the dataset. - """ - - def __init__(self, - dataloader, - start=None, - interval=1, - by_epoch=True, - save_best=None, - rule=None, - test_fn=None, - greater_keys=None, - less_keys=None, - broadcast_bn_buffer=True, - tmpdir=None, - gpu_collect=False, - out_dir=None, - file_client_args=None, - **eval_kwargs): - - if test_fn is None: - from annotator.uniformer.mmcv.engine import multi_gpu_test - test_fn = multi_gpu_test - - super().__init__( - dataloader, - start=start, - interval=interval, - by_epoch=by_epoch, - save_best=save_best, - rule=rule, - test_fn=test_fn, - greater_keys=greater_keys, - less_keys=less_keys, - out_dir=out_dir, - file_client_args=file_client_args, - **eval_kwargs) - - self.broadcast_bn_buffer = broadcast_bn_buffer - self.tmpdir = tmpdir - self.gpu_collect = gpu_collect - - def _do_evaluate(self, runner): - """perform evaluation and save ckpt.""" - # Synchronization of BatchNorm's buffer (running_mean - # and running_var) is not supported in the DDP of pytorch, - # which may cause the inconsistent performance of models in - # different ranks, so we broadcast BatchNorm's buffers - # of rank 0 to other ranks to avoid this. - if self.broadcast_bn_buffer: - model = runner.model - for name, module in model.named_modules(): - if isinstance(module, - _BatchNorm) and module.track_running_stats: - dist.broadcast(module.running_var, 0) - dist.broadcast(module.running_mean, 0) - - tmpdir = self.tmpdir - if tmpdir is None: - tmpdir = osp.join(runner.work_dir, '.eval_hook') - - results = self.test_fn( - runner.model, - self.dataloader, - tmpdir=tmpdir, - gpu_collect=self.gpu_collect) - if runner.rank == 0: - print('\n') - runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) - key_score = self.evaluate(runner, results) - # the key_score may be `None` so it needs to skip the action to - # save the best checkpoint - if self.save_best and key_score: - self._save_ckpt(runner, key_score) diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py deleted file mode 100644 index 687cdc58c0336c92b1e4f9a410ba67ebaab2bc7a..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ...dist_utils import master_only -from ..hook import HOOKS -from .base import LoggerHook - - -@HOOKS.register_module() -class DvcliveLoggerHook(LoggerHook): - """Class to log metrics with dvclive. - - It requires `dvclive`_ to be installed. - - Args: - path (str): Directory where dvclive will write TSV log files. - interval (int): Logging interval (every k iterations). - Default 10. - ignore_last (bool): Ignore the log of last iterations in each epoch - if less than `interval`. - Default: True. - reset_flag (bool): Whether to clear the output buffer after logging. - Default: True. - by_epoch (bool): Whether EpochBasedRunner is used. - Default: True. - - .. _dvclive: - https://dvc.org/doc/dvclive - """ - - def __init__(self, - path, - interval=10, - ignore_last=True, - reset_flag=True, - by_epoch=True): - - super(DvcliveLoggerHook, self).__init__(interval, ignore_last, - reset_flag, by_epoch) - self.path = path - self.import_dvclive() - - def import_dvclive(self): - try: - import dvclive - except ImportError: - raise ImportError( - 'Please run "pip install dvclive" to install dvclive') - self.dvclive = dvclive - - @master_only - def before_run(self, runner): - self.dvclive.init(self.path) - - @master_only - def log(self, runner): - tags = self.get_loggable_tags(runner) - if tags: - for k, v in tags.items(): - self.dvclive.log(k, v, step=self.get_iter(runner)) diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/cli/__init__.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/cli/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/request.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/request.py deleted file mode 100644 index 398386a5b9f61c13be314e256e671a37d28e3623..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/request.py +++ /dev/null @@ -1,170 +0,0 @@ -from __future__ import absolute_import - -from .filepost import encode_multipart_formdata -from .packages.six.moves.urllib.parse import urlencode - -__all__ = ["RequestMethods"] - - -class RequestMethods(object): - """ - Convenience mixin for classes who implement a :meth:`urlopen` method, such - as :class:`urllib3.HTTPConnectionPool` and - :class:`urllib3.PoolManager`. - - Provides behavior for making common types of HTTP request methods and - decides which type of request field encoding to use. - - Specifically, - - :meth:`.request_encode_url` is for sending requests whose fields are - encoded in the URL (such as GET, HEAD, DELETE). - - :meth:`.request_encode_body` is for sending requests whose fields are - encoded in the *body* of the request using multipart or www-form-urlencoded - (such as for POST, PUT, PATCH). - - :meth:`.request` is for making any kind of request, it will look up the - appropriate encoding format and use one of the above two methods to make - the request. - - Initializer parameters: - - :param headers: - Headers to include with all requests, unless other headers are given - explicitly. - """ - - _encode_url_methods = {"DELETE", "GET", "HEAD", "OPTIONS"} - - def __init__(self, headers=None): - self.headers = headers or {} - - def urlopen( - self, - method, - url, - body=None, - headers=None, - encode_multipart=True, - multipart_boundary=None, - **kw - ): # Abstract - raise NotImplementedError( - "Classes extending RequestMethods must implement " - "their own ``urlopen`` method." - ) - - def request(self, method, url, fields=None, headers=None, **urlopen_kw): - """ - Make a request using :meth:`urlopen` with the appropriate encoding of - ``fields`` based on the ``method`` used. - - This is a convenience method that requires the least amount of manual - effort. It can be used in most situations, while still having the - option to drop down to more specific methods when necessary, such as - :meth:`request_encode_url`, :meth:`request_encode_body`, - or even the lowest level :meth:`urlopen`. - """ - method = method.upper() - - urlopen_kw["request_url"] = url - - if method in self._encode_url_methods: - return self.request_encode_url( - method, url, fields=fields, headers=headers, **urlopen_kw - ) - else: - return self.request_encode_body( - method, url, fields=fields, headers=headers, **urlopen_kw - ) - - def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw): - """ - Make a request using :meth:`urlopen` with the ``fields`` encoded in - the url. This is useful for request methods like GET, HEAD, DELETE, etc. - """ - if headers is None: - headers = self.headers - - extra_kw = {"headers": headers} - extra_kw.update(urlopen_kw) - - if fields: - url += "?" + urlencode(fields) - - return self.urlopen(method, url, **extra_kw) - - def request_encode_body( - self, - method, - url, - fields=None, - headers=None, - encode_multipart=True, - multipart_boundary=None, - **urlopen_kw - ): - """ - Make a request using :meth:`urlopen` with the ``fields`` encoded in - the body. This is useful for request methods like POST, PUT, PATCH, etc. - - When ``encode_multipart=True`` (default), then - :func:`urllib3.encode_multipart_formdata` is used to encode - the payload with the appropriate content type. Otherwise - :func:`urllib.parse.urlencode` is used with the - 'application/x-www-form-urlencoded' content type. - - Multipart encoding must be used when posting files, and it's reasonably - safe to use it in other times too. However, it may break request - signing, such as with OAuth. - - Supports an optional ``fields`` parameter of key/value strings AND - key/filetuple. A filetuple is a (filename, data, MIME type) tuple where - the MIME type is optional. For example:: - - fields = { - 'foo': 'bar', - 'fakefile': ('foofile.txt', 'contents of foofile'), - 'realfile': ('barfile.txt', open('realfile').read()), - 'typedfile': ('bazfile.bin', open('bazfile').read(), - 'image/jpeg'), - 'nonamefile': 'contents of nonamefile field', - } - - When uploading a file, providing a filename (the first parameter of the - tuple) is optional but recommended to best mimic behavior of browsers. - - Note that if ``headers`` are supplied, the 'Content-Type' header will - be overwritten because it depends on the dynamic random boundary string - which is used to compose the body of the request. The random boundary - string can be explicitly set with the ``multipart_boundary`` parameter. - """ - if headers is None: - headers = self.headers - - extra_kw = {"headers": {}} - - if fields: - if "body" in urlopen_kw: - raise TypeError( - "request got values for both 'fields' and 'body', can only specify one." - ) - - if encode_multipart: - body, content_type = encode_multipart_formdata( - fields, boundary=multipart_boundary - ) - else: - body, content_type = ( - urlencode(fields), - "application/x-www-form-urlencoded", - ) - - extra_kw["body"] = body - extra_kw["headers"] = {"Content-Type": content_type} - - extra_kw["headers"].update(headers) - extra_kw.update(urlopen_kw) - - return self.urlopen(method, url, **extra_kw) diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/export/__init__.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/export/__init__.py deleted file mode 100644 index 25e5c94618a71cc584756ca2e17d6233a072dd87..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/export/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- - -try: - from caffe2.proto import caffe2_pb2 as _tmp - - # caffe2 is optional -except ImportError: - pass -else: - from .api import * - -from .flatten import TracingAdapter -from .torchscript import scripting_with_instances, dump_torchscript_IR - -__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/nms.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/nms.py deleted file mode 100644 index 6b6be71c7832d188aaa20bd7e1b16964cab7a731..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/nms.py +++ /dev/null @@ -1,139 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -import torch -from torchvision.ops import boxes as box_ops -from torchvision.ops import nms # noqa . for compatibility - - -def batched_nms( - boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float -): - """ - Same as torchvision.ops.boxes.batched_nms, but with float(). - """ - assert boxes.shape[-1] == 4 - # Note: Torchvision already has a strategy (https://github.com/pytorch/vision/issues/1311) - # to decide whether to use coordinate trick or for loop to implement batched_nms. So we - # just call it directly. - # Fp16 does not have enough range for batched NMS, so adding float(). - return box_ops.batched_nms(boxes.float(), scores, idxs, iou_threshold) - - -# Note: this function (nms_rotated) might be moved into -# torchvision/ops/boxes.py in the future -def nms_rotated(boxes, scores, iou_threshold): - """ - Performs non-maximum suppression (NMS) on the rotated boxes according - to their intersection-over-union (IoU). - - Rotated NMS iteratively removes lower scoring rotated boxes which have an - IoU greater than iou_threshold with another (higher scoring) rotated box. - - Note that RotatedBox (5, 3, 4, 2, -90) covers exactly the same region as - RotatedBox (5, 3, 4, 2, 90) does, and their IoU will be 1. However, they - can be representing completely different objects in certain tasks, e.g., OCR. - - As for the question of whether rotated-NMS should treat them as faraway boxes - even though their IOU is 1, it depends on the application and/or ground truth annotation. - - As an extreme example, consider a single character v and the square box around it. - - If the angle is 0 degree, the object (text) would be read as 'v'; - - If the angle is 90 degrees, the object (text) would become '>'; - - If the angle is 180 degrees, the object (text) would become '^'; - - If the angle is 270/-90 degrees, the object (text) would become '<' - - All of these cases have IoU of 1 to each other, and rotated NMS that only - uses IoU as criterion would only keep one of them with the highest score - - which, practically, still makes sense in most cases because typically - only one of theses orientations is the correct one. Also, it does not matter - as much if the box is only used to classify the object (instead of transcribing - them with a sequential OCR recognition model) later. - - On the other hand, when we use IoU to filter proposals that are close to the - ground truth during training, we should definitely take the angle into account if - we know the ground truth is labeled with the strictly correct orientation (as in, - upside-down words are annotated with -180 degrees even though they can be covered - with a 0/90/-90 degree box, etc.) - - The way the original dataset is annotated also matters. For example, if the dataset - is a 4-point polygon dataset that does not enforce ordering of vertices/orientation, - we can estimate a minimum rotated bounding box to this polygon, but there's no way - we can tell the correct angle with 100% confidence (as shown above, there could be 4 different - rotated boxes, with angles differed by 90 degrees to each other, covering the exactly - same region). In that case we have to just use IoU to determine the box - proximity (as many detection benchmarks (even for text) do) unless there're other - assumptions we can make (like width is always larger than height, or the object is not - rotated by more than 90 degrees CCW/CW, etc.) - - In summary, not considering angles in rotated NMS seems to be a good option for now, - but we should be aware of its implications. - - Args: - boxes (Tensor[N, 5]): Rotated boxes to perform NMS on. They are expected to be in - (x_center, y_center, width, height, angle_degrees) format. - scores (Tensor[N]): Scores for each one of the rotated boxes - iou_threshold (float): Discards all overlapping rotated boxes with IoU < iou_threshold - - Returns: - keep (Tensor): int64 tensor with the indices of the elements that have been kept - by Rotated NMS, sorted in decreasing order of scores - """ - return torch.ops.detectron2.nms_rotated(boxes, scores, iou_threshold) - - -# Note: this function (batched_nms_rotated) might be moved into -# torchvision/ops/boxes.py in the future -def batched_nms_rotated(boxes, scores, idxs, iou_threshold): - """ - Performs non-maximum suppression in a batched fashion. - - Each index value correspond to a category, and NMS - will not be applied between elements of different categories. - - Args: - boxes (Tensor[N, 5]): - boxes where NMS will be performed. They - are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format - scores (Tensor[N]): - scores for each one of the boxes - idxs (Tensor[N]): - indices of the categories for each one of the boxes. - iou_threshold (float): - discards all overlapping boxes - with IoU < iou_threshold - - Returns: - Tensor: - int64 tensor with the indices of the elements that have been kept - by NMS, sorted in decreasing order of scores - """ - assert boxes.shape[-1] == 5 - - if boxes.numel() == 0: - return torch.empty((0,), dtype=torch.int64, device=boxes.device) - boxes = boxes.float() # fp16 does not have enough range for batched NMS - # Strategy: in order to perform NMS independently per class, - # we add an offset to all the boxes. The offset is dependent - # only on the class idx, and is large enough so that boxes - # from different classes do not overlap - - # Note that batched_nms in torchvision/ops/boxes.py only uses max_coordinate, - # which won't handle negative coordinates correctly. - # Here by using min_coordinate we can make sure the negative coordinates are - # correctly handled. - max_coordinate = ( - torch.max(boxes[:, 0], boxes[:, 1]) + torch.max(boxes[:, 2], boxes[:, 3]) / 2 - ).max() - min_coordinate = ( - torch.min(boxes[:, 0], boxes[:, 1]) - torch.max(boxes[:, 2], boxes[:, 3]) / 2 - ).min() - offsets = idxs.to(boxes) * (max_coordinate - min_coordinate + 1) - boxes_for_nms = boxes.clone() # avoid modifying the original values in boxes - boxes_for_nms[:, :2] += offsets[:, None] - keep = nms_rotated(boxes_for_nms, scores, iou_threshold) - return keep diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/test_model_zoo.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/test_model_zoo.py deleted file mode 100644 index e3360a74864e0c00ed92ffbc8531c8d36e8be379..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/test_model_zoo.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -import unittest - -from detectron2 import model_zoo -from detectron2.config import instantiate -from detectron2.modeling import FPN, GeneralizedRCNN - -logger = logging.getLogger(__name__) - - -class TestModelZoo(unittest.TestCase): - def test_get_returns_model(self): - model = model_zoo.get("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml", trained=False) - self.assertIsInstance(model, GeneralizedRCNN) - self.assertIsInstance(model.backbone, FPN) - - def test_get_invalid_model(self): - self.assertRaises(RuntimeError, model_zoo.get, "Invalid/config.yaml") - - def test_get_url(self): - url = model_zoo.get_checkpoint_url("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml") - self.assertEqual( - url, - "https://dl.fbaipublicfiles.com/detectron2/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn/138602908/model_final_01ca85.pkl", # noqa - ) - url2 = model_zoo.get_checkpoint_url("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.py") - self.assertEqual(url, url2) - - def _build_lazy_model(self, name): - cfg = model_zoo.get_config("common/models/" + name) - instantiate(cfg.model) - - def test_mask_rcnn_fpn(self): - self._build_lazy_model("mask_rcnn_fpn.py") - - def test_mask_rcnn_c4(self): - self._build_lazy_model("mask_rcnn_c4.py") - - def test_panoptic_fpn(self): - self._build_lazy_model("panoptic_fpn.py") - - def test_schedule(self): - cfg = model_zoo.get_config("common/coco_schedule.py") - for _, v in cfg.items(): - instantiate(v) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/Tirendaz/background-remover/app.py b/spaces/Tirendaz/background-remover/app.py deleted file mode 100644 index c92b1d8fac26dc50b5fa4619c80e4e7c4bc036a1..0000000000000000000000000000000000000000 --- a/spaces/Tirendaz/background-remover/app.py +++ /dev/null @@ -1,74 +0,0 @@ -import gradio as gr -import cv2 -import torch -import numpy as np -from torchvision import transforms - -title = "Background Remover" -description = "Automatically remove the image background from a profile photo." -article = "

    Blog | Github Repo

    " - - -def make_transparent_foreground(pic, mask): - # split the image into channels - b, g, r = cv2.split(np.array(pic).astype('uint8')) - # add an alpha channel with and fill all with transparent pixels (max 255) - a = np.ones(mask.shape, dtype='uint8') * 255 - # merge the alpha channel back - alpha_im = cv2.merge([b, g, r, a], 4) - # create a transparent background - bg = np.zeros(alpha_im.shape) - # setup the new mask - new_mask = np.stack([mask, mask, mask, mask], axis=2) - # copy only the foreground color pixels from the original image where mask is set - foreground = np.where(new_mask, alpha_im, bg).astype(np.uint8) - - return foreground - - -def remove_background(input_image): - preprocess = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ]) - - input_tensor = preprocess(input_image) - input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model - - # move the input and model to GPU for speed if available - if torch.cuda.is_available(): - input_batch = input_batch.to('cuda') - model.to('cuda') - - with torch.no_grad(): - output = model(input_batch)['out'][0] - output_predictions = output.argmax(0) - - # create a binary (black and white) mask of the profile foreground - mask = output_predictions.byte().cpu().numpy() - background = np.zeros(mask.shape) - bin_mask = np.where(mask, 255, background).astype(np.uint8) - - foreground = make_transparent_foreground(input_image, bin_mask) - - return foreground, bin_mask - - -def inference(img): - foreground, _ = remove_background(img) - return foreground - - -model = torch.hub.load('pytorch/vision:v0.6.0', 'deeplabv3_resnet101', pretrained=True) -model.eval() - -gr.Interface( - inference, - gr.inputs.Image(type="pil", label="Input"), - gr.outputs.Image(type="pil", label="Output"), - title=title, - description=description, - article=article, - examples=[['woman.jpg'], ['man.jpg']], - enable_queue=True -).launch(debug=False) \ No newline at end of file diff --git a/spaces/UMich-siads699-fa22-spotamood/spotamood/pages/clustering.md b/spaces/UMich-siads699-fa22-spotamood/spotamood/pages/clustering.md deleted file mode 100644 index 42523d65e1ea3ae68e7659261f1b206ff3443516..0000000000000000000000000000000000000000 --- a/spaces/UMich-siads699-fa22-spotamood/spotamood/pages/clustering.md +++ /dev/null @@ -1,5 +0,0 @@ -#### Songs Lyrics Clustering - -Based on the data and SBERT model called **"all-distilroberta-v1"**. This model is a pre-trained model based on various selections of the trained model corpus. The reason that we chose this model is due to its size of the model and performance. The model size is almost half that, 290MB, of the 1st and 2nd best-performing pre-trained models. Once we get the embedding of the lyrics from the sample songs that we sampled from the dataset, we then performed analysis on what is the best number of clusters using elbow plot. From the elbow plot below, we can see that the best number of clusters using the embedding of the songs is 7 clusters. We then perform the clustering on our dataset using MiniBatchKmean to help with the speed of the clustering considering the embedding size and samples. - -Finally, we can observe from the results of the clustering of the lyrics line that it clustered quite well and users can play with the interactive plot of the cluster on the main app screen. From this cluster, however, we discover that there are multiple lines with empty lyrics and some lines which contain the lyrics partition such as ***Chorus*** or ***Acoustic***, while some contain other song attributes. This means we need to clean up the lyrics line much more to make sure that the cluster is only representing important lyrics lines that matter. This, however, does not interfere with the recommendation ranking since the ranking is based on the overall song similarity based on all the lines in the song while suppressed scores for lines that are less similar to the query than the rest. \ No newline at end of file diff --git a/spaces/VIPLab/Caption-Anything/README.md b/spaces/VIPLab/Caption-Anything/README.md deleted file mode 100644 index 170c7a1568a08bcd8131ebb66ba1c0f86e138fbd..0000000000000000000000000000000000000000 --- a/spaces/VIPLab/Caption-Anything/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Caption Anything -emoji: 📚 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.26.0 -python_version: 3.8.9 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: TencentARC/Caption-Anything ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/Wewordle.py b/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/Wewordle.py deleted file mode 100644 index 18cfe8c58f3c5066fe9f5633b2d284532d70c4f4..0000000000000000000000000000000000000000 --- a/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/Wewordle.py +++ /dev/null @@ -1,73 +0,0 @@ -import os -import requests -import json -import random -import time -import string -from ...typing import sha256, Dict, get_type_hints - -url = "https://wewordle.org/gptapi/v1/android/turbo" -model = ['gpt-3.5-turbo'] -supports_stream = False -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - base = '' - for message in messages: - base += '%s: %s\n' % (message['role'], message['content']) - base += 'assistant:' - # randomize user id and app id - _user_id = ''.join(random.choices( - f'{string.ascii_lowercase}{string.digits}', k=16)) - _app_id = ''.join(random.choices( - f'{string.ascii_lowercase}{string.digits}', k=31)) - # make current date with format utc - _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime()) - headers = { - 'accept': '*/*', - 'pragma': 'no-cache', - 'Content-Type': 'application/json', - 'Connection': 'keep-alive' - } - data = { - "user": _user_id, - "messages": [ - {"role": "user", "content": base} - ], - "subscriber": { - "originalPurchaseDate": None, - "originalApplicationVersion": None, - "allPurchaseDatesMillis": {}, - "entitlements": { - "active": {}, - "all": {} - }, - "allPurchaseDates": {}, - "allExpirationDatesMillis": {}, - "allExpirationDates": {}, - "originalAppUserId": f"$RCAnonymousID:{_app_id}", - "latestExpirationDate": None, - "requestDate": _request_date, - "latestExpirationDateMillis": None, - "nonSubscriptionTransactions": [], - "originalPurchaseDateMillis": None, - "managementURL": None, - "allPurchasedProductIdentifiers": [], - "firstSeen": _request_date, - "activeSubscriptions": [] - } - } - response = requests.post(url, headers=headers, data=json.dumps(data)) - if response.status_code == 200: - _json = response.json() - if 'message' in _json: - yield _json['message']['content'] - else: - print(f"Error Occurred::{response.status_code}") - return None - - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join( - [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) diff --git a/spaces/Vynock/rvc-wefu/vc_infer_pipeline.py b/spaces/Vynock/rvc-wefu/vc_infer_pipeline.py deleted file mode 100644 index c26d45068f9b6bf2b194b13c3c89f8a06347c124..0000000000000000000000000000000000000000 --- a/spaces/Vynock/rvc-wefu/vc_infer_pipeline.py +++ /dev/null @@ -1,306 +0,0 @@ -import numpy as np, parselmouth, torch, pdb -from time import time as ttime -import torch.nn.functional as F -from config import x_pad, x_query, x_center, x_max -import scipy.signal as signal -import pyworld, os, traceback, faiss -from scipy import signal - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - - -class VC(object): - def __init__(self, tgt_sr, device, is_half): - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * x_query # 查询切点前后查询时间 - self.t_center = self.sr * x_center # 查询切点位置 - self.t_max = self.sr * x_max # 免查询时长阈值 - self.device = device - self.is_half = is_half - - def get_f0(self, x, p_len, f0_up_key, f0_method, inp_f0=None): - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - f0, t = pyworld.harvest( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0] - f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9, # layer 9 - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) - - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - _, I = index.search(npy, 1) - npy = big_npy[I.squeeze()] - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768) - .data.cpu() - .float() - .numpy() - .astype(np.int16) - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768) - .data.cpu() - .float() - .numpy() - .astype(np.int16) - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def pipeline( - self, - model, - net_g, - sid, - audio, - times, - f0_up_key, - f0_method, - file_index, - file_big_npy, - index_rate, - if_f0, - f0_file=None, - ): - if ( - file_big_npy != "" - and file_index != "" - and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - big_npy = np.load(file_big_npy) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - print("Feature retrieval library doesn't exist or ratio is 0") - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key, f0_method, inp_f0) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - pitch[:, s // self.window : (t + self.t_pad2) // self.window], - pitchf[:, s // self.window : (t + self.t_pad2) // self.window], - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - None, - None, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - s = t - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - pitch[:, t // self.window :] if t is not None else pitch, - pitchf[:, t // self.window :] if t is not None else pitchf, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - None, - None, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - audio_opt = np.concatenate(audio_opt) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt diff --git a/spaces/Wanlau/sovits-4.0_datealive/onnx_export.py b/spaces/Wanlau/sovits-4.0_datealive/onnx_export.py deleted file mode 100644 index 53b278dd9495550cd48dbddb92435b29b5bb2e6a..0000000000000000000000000000000000000000 --- a/spaces/Wanlau/sovits-4.0_datealive/onnx_export.py +++ /dev/null @@ -1,94 +0,0 @@ -import torch -from torchaudio.models.wav2vec2.utils import import_fairseq_model -from fairseq import checkpoint_utils -from onnxexport.model_onnx import SynthesizerTrn -import utils - -def get_hubert_model(): - vec_path = "hubert/checkpoint_best_legacy_500.pt" - print("load model(s) from {}".format(vec_path)) - models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - [vec_path], - suffix="", - ) - model = models[0] - model.eval() - return model - - -def main(HubertExport, NetExport): - path = "SoVits4.0" - - '''if HubertExport: - device = torch.device("cpu") - vec_path = "hubert/checkpoint_best_legacy_500.pt" - models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - [vec_path], - suffix="", - ) - original = models[0] - original.eval() - model = original - test_input = torch.rand(1, 1, 16000) - model(test_input) - torch.onnx.export(model, - test_input, - "hubert4.0.onnx", - export_params=True, - opset_version=16, - do_constant_folding=True, - input_names=['source'], - output_names=['embed'], - dynamic_axes={ - 'source': - { - 2: "sample_length" - }, - } - )''' - if NetExport: - device = torch.device("cpu") - hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json") - SVCVITS = SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model) - _ = utils.load_checkpoint(f"checkpoints/{path}/model.pth", SVCVITS, None) - _ = SVCVITS.eval().to(device) - for i in SVCVITS.parameters(): - i.requires_grad = False - test_hidden_unit = torch.rand(1, 10, 256) - test_pitch = torch.rand(1, 10) - test_mel2ph = torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).unsqueeze(0) - test_uv = torch.ones(1, 10, dtype=torch.float32) - test_noise = torch.randn(1, 192, 10) - test_sid = torch.LongTensor([0]) - input_names = ["c", "f0", "mel2ph", "uv", "noise", "sid"] - output_names = ["audio", ] - SVCVITS.eval() - torch.onnx.export(SVCVITS, - ( - test_hidden_unit.to(device), - test_pitch.to(device), - test_mel2ph.to(device), - test_uv.to(device), - test_noise.to(device), - test_sid.to(device) - ), - f"checkpoints/{path}/model.onnx", - dynamic_axes={ - "c": [0, 1], - "f0": [1], - "mel2ph": [1], - "uv": [1], - "noise": [2], - }, - do_constant_folding=False, - opset_version=16, - verbose=False, - input_names=input_names, - output_names=output_names) - - -if __name__ == '__main__': - main(False, True) diff --git "a/spaces/Wazzzabeee/image-video-colorization/pages/02_\360\237\216\245_Input_Youtube_Link.py" "b/spaces/Wazzzabeee/image-video-colorization/pages/02_\360\237\216\245_Input_Youtube_Link.py" deleted file mode 100644 index fae44f01f84446647480ad26c63e60b66f4b29a8..0000000000000000000000000000000000000000 --- "a/spaces/Wazzzabeee/image-video-colorization/pages/02_\360\237\216\245_Input_Youtube_Link.py" +++ /dev/null @@ -1,138 +0,0 @@ -import time - -import cv2 -import moviepy.editor as mp -import numpy as np -import streamlit as st -from pytube import YouTube -from streamlit_lottie import st_lottie -from tqdm import tqdm - -from models.deep_colorization.colorizers import eccv16 -from utils import colorize_frame, format_time -from utils import load_lottieurl, change_model - -st.set_page_config(page_title="Image & Video Colorizer", page_icon="🎨", layout="wide") - - -loaded_model = eccv16(pretrained=True).eval() -current_model = "None" - - -col1, col2 = st.columns([1, 3]) -with col1: - lottie = load_lottieurl("https://assets5.lottiefiles.com/packages/lf20_RHdEuzVfEL.json") - st_lottie(lottie) - -with col2: - st.write(""" - ## B&W Videos Colorizer - ##### Input a YouTube black and white video link and get a colorized version of it. - ###### ➠ This space is using CPU Basic so it might take a while to colorize a video. - ###### ➠ If you want more models and GPU available please support this space by donating.""") - - -@st.cache_data() -def download_video(link): - yt = YouTube(link) - video = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first().download(filename="video.mp4") - return video - - -def main(): - model = st.selectbox( - "Select Model (Both models have their pros and cons, I recommend trying both and keeping the best for you task)", - ["ECCV16", "SIGGRAPH17"], index=0) - - loaded_model = change_model(current_model, model) - st.write(f"Model is now {model}") - - link = st.text_input("YouTube Link (The longer the video, the longer the processing time)") - if st.button("Colorize"): - yt_video = download_video(link) - print(yt_video) - col1, col2 = st.columns([0.5, 0.5]) - with col1: - st.markdown('

    Before

    ', unsafe_allow_html=True) - st.video(yt_video) - with col2: - st.markdown('

    After

    ', unsafe_allow_html=True) - with st.spinner("Colorizing frames..."): - # Colorize video frames and store in a list - output_frames = [] - - audio = mp.AudioFileClip("video.mp4") - video = cv2.VideoCapture("video.mp4") - - total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) - fps = video.get(cv2.CAP_PROP_FPS) - - progress_bar = st.progress(0) # Create a progress bar - start_time = time.time() - time_text = st.text("Time Remaining: ") # Initialize text value - - for _ in tqdm(range(total_frames), unit='frame', desc="Progress"): - ret, frame = video.read() - if not ret: - break - - colorized_frame = colorize_frame(frame, loaded_model) - output_frames.append((colorized_frame * 255).astype(np.uint8)) - - elapsed_time = time.time() - start_time - frames_completed = len(output_frames) - frames_remaining = total_frames - frames_completed - time_remaining = (frames_remaining / frames_completed) * elapsed_time - - progress_bar.progress(frames_completed / total_frames) # Update progress bar - - if frames_completed < total_frames: - time_text.text(f"Time Remaining: {format_time(time_remaining)}") # Update text value - else: - time_text.empty() # Remove text value - progress_bar.empty() - - with st.spinner("Merging frames to video..."): - frame_size = output_frames[0].shape[:2] - output_filename = "output.mp4" - fourcc = cv2.VideoWriter_fourcc(*"mp4v") # Codec for MP4 video - out = cv2.VideoWriter(output_filename, fourcc, fps, (frame_size[1], frame_size[0])) - - # Display the colorized video using st.video - for frame in output_frames: - frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) - - out.write(frame_bgr) - - out.release() - - # Convert the output video to a format compatible with Streamlit - converted_filename = "converted_output.mp4" - clip = mp.VideoFileClip(output_filename) - clip = clip.set_audio(audio) - - clip.write_videofile(converted_filename, codec="libx264") - - # Display the converted video using st.video() - st.video(converted_filename) - st.balloons() - - # Add a download button for the colorized video - st.download_button( - label="Download Colorized Video", - data=open(converted_filename, "rb").read(), - file_name="colorized_video.mp4" - ) - - # Close and delete the temporary file after processing - video.release() - - -if __name__ == "__main__": - main() - st.markdown( - "###### Made with :heart: by [Clément Delteil](https://www.linkedin.com/in/clementdelteil/) [![this is an " - "image link](https://i.imgur.com/thJhzOO.png)](https://www.buymeacoffee.com/clementdelteil)") - st.markdown( - "###### [Blog post of the project](https://medium.com/geekculture/creating-a-web-app-to-colorize-images-and-youtube-videos-80f5be2d0f68)" - ) diff --git a/spaces/XzJosh/Lumi-Bert-VITS2/text/chinese_bert.py b/spaces/XzJosh/Lumi-Bert-VITS2/text/chinese_bert.py deleted file mode 100644 index cb84ce0b426cd0a1c7954ddcdf41322c10ed14fa..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Lumi-Bert-VITS2/text/chinese_bert.py +++ /dev/null @@ -1,50 +0,0 @@ -import torch -from transformers import AutoTokenizer, AutoModelForMaskedLM - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -tokenizer = AutoTokenizer.from_pretrained("./bert/chinese-roberta-wwm-ext-large") -model = AutoModelForMaskedLM.from_pretrained("./bert/chinese-roberta-wwm-ext-large").to(device) - -def get_bert_feature(text, word2ph): - with torch.no_grad(): - inputs = tokenizer(text, return_tensors='pt') - for i in inputs: - inputs[i] = inputs[i].to(device) - res = model(**inputs, output_hidden_states=True) - res = torch.cat(res['hidden_states'][-3:-2], -1)[0].cpu() - - assert len(word2ph) == len(text)+2 - word2phone = word2ph - phone_level_feature = [] - for i in range(len(word2phone)): - repeat_feature = res[i].repeat(word2phone[i], 1) - phone_level_feature.append(repeat_feature) - - phone_level_feature = torch.cat(phone_level_feature, dim=0) - - - return phone_level_feature.T - -if __name__ == '__main__': - # feature = get_bert_feature('你好,我是说的道理。') - import torch - - word_level_feature = torch.rand(38, 1024) # 12个词,每个词1024维特征 - word2phone = [1, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1] - - # 计算总帧数 - total_frames = sum(word2phone) - print(word_level_feature.shape) - print(word2phone) - phone_level_feature = [] - for i in range(len(word2phone)): - print(word_level_feature[i].shape) - - # 对每个词重复word2phone[i]次 - repeat_feature = word_level_feature[i].repeat(word2phone[i], 1) - phone_level_feature.append(repeat_feature) - - phone_level_feature = torch.cat(phone_level_feature, dim=0) - print(phone_level_feature.shape) # torch.Size([36, 1024]) - diff --git a/spaces/YONG627/456123/yolov5-code-main/benchmarks.py b/spaces/YONG627/456123/yolov5-code-main/benchmarks.py deleted file mode 100644 index 09108b8a7cc4ef07c97e3835d428bba106b71629..0000000000000000000000000000000000000000 --- a/spaces/YONG627/456123/yolov5-code-main/benchmarks.py +++ /dev/null @@ -1,169 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Run YOLOv5 benchmarks on all supported export formats - -Format | `export.py --include` | Model ---- | --- | --- -PyTorch | - | yolov5s.pt -TorchScript | `torchscript` | yolov5s.torchscript -ONNX | `onnx` | yolov5s.onnx -OpenVINO | `openvino` | yolov5s_openvino_model/ -TensorRT | `engine` | yolov5s.engine -CoreML | `coreml` | yolov5s.mlmodel -TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ -TensorFlow GraphDef | `pb` | yolov5s.pb -TensorFlow Lite | `tflite` | yolov5s.tflite -TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite -TensorFlow.js | `tfjs` | yolov5s_web_model/ - -Requirements: - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU - $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT - -Usage: - $ python benchmarks.py --weights yolov5s.pt --img 640 -""" - -import argparse -import platform -import sys -import time -from pathlib import Path - -import pandas as pd - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -# ROOT = ROOT.relative_to(Path.cwd()) # relative - -import export -from models.experimental import attempt_load -from models.yolo import SegmentationModel -from segment.val import run as val_seg -from utils import notebook_init -from utils.general import LOGGER, check_yaml, file_size, print_args -from utils.torch_utils import select_device -from val import run as val_det - - -def run( - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=640, # inference size (pixels) - batch_size=1, # batch size - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - half=False, # use FP16 half-precision inference - test=False, # test exports only - pt_only=False, # test PyTorch only - hard_fail=False, # throw error on benchmark failure -): - y, t = [], time.time() - device = select_device(device) - model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc. - for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) - try: - assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported - assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML - if 'cpu' in device.type: - assert cpu, 'inference not supported on CPU' - if 'cuda' in device.type: - assert gpu, 'inference not supported on GPU' - - # Export - if f == '-': - w = weights # PyTorch format - else: - w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others - assert suffix in str(w), 'export failed' - - # Validate - if model_type == SegmentationModel: - result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) - metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls)) - else: # DetectionModel: - result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) - metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls)) - speed = result[2][1] # times (preprocess, inference, postprocess) - y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference - except Exception as e: - if hard_fail: - assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' - LOGGER.warning(f'WARNING ⚠️ Benchmark failure for {name}: {e}') - y.append([name, None, None, None]) # mAP, t_inference - if pt_only and i == 0: - break # break after PyTorch - - # Print results - LOGGER.info('\n') - parse_opt() - notebook_init() # print system info - c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', ''] - py = pd.DataFrame(y, columns=c) - LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') - LOGGER.info(str(py if map else py.iloc[:, :2])) - if hard_fail and isinstance(hard_fail, str): - metrics = py['mAP50-95'].array # values to compare to floor - floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n - assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}' - return py - - -def test( - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=640, # inference size (pixels) - batch_size=1, # batch size - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - half=False, # use FP16 half-precision inference - test=False, # test exports only - pt_only=False, # test PyTorch only - hard_fail=False, # throw error on benchmark failure -): - y, t = [], time.time() - device = select_device(device) - for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable) - try: - w = weights if f == '-' else \ - export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights - assert suffix in str(w), 'export failed' - y.append([name, True]) - except Exception: - y.append([name, False]) # mAP, t_inference - - # Print results - LOGGER.info('\n') - parse_opt() - notebook_init() # print system info - py = pd.DataFrame(y, columns=['Format', 'Export']) - LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)') - LOGGER.info(str(py)) - return py - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--test', action='store_true', help='test exports only') - parser.add_argument('--pt-only', action='store_true', help='test PyTorch only') - parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric') - opt = parser.parse_args() - opt.data = check_yaml(opt.data) # check YAML - print_args(vars(opt)) - return opt - - -def main(opt): - test(**vars(opt)) if opt.test else run(**vars(opt)) - - -if __name__ == '__main__': - opt = parse_opt() - main(opt) diff --git a/spaces/Yah216/Arabic-Sentiment-Analyser/app.py b/spaces/Yah216/Arabic-Sentiment-Analyser/app.py deleted file mode 100644 index 36647ad56d708d204c30606590090b978bed95eb..0000000000000000000000000000000000000000 --- a/spaces/Yah216/Arabic-Sentiment-Analyser/app.py +++ /dev/null @@ -1,12 +0,0 @@ -import streamlit as st -import tensorflow as tf - -from transformers import pipeline - -model_ckpt = "Yah216/Sentiment_Analysis_CAMelBERT_msa_sixteenth_HARD" - -pipe = pipeline("text-classification", model_ckpt) - -text = st.text_area("Enter some text in arabic language!") -if text: - st.write(pipe(text)[0]["label"]) \ No newline at end of file diff --git a/spaces/YlcldKlns/bing/src/lib/utils.ts b/spaces/YlcldKlns/bing/src/lib/utils.ts deleted file mode 100644 index 8de2eba94bf0bc93579d4f489e8b810dbf6ce92a..0000000000000000000000000000000000000000 --- a/spaces/YlcldKlns/bing/src/lib/utils.ts +++ /dev/null @@ -1,159 +0,0 @@ -import { clsx, type ClassValue } from 'clsx' -import { customAlphabet } from 'nanoid' -import { twMerge } from 'tailwind-merge' -// @ts-ignore -import randomip from 'random-ip' -import cidr from './cidr.json' - -export function cn(...inputs: ClassValue[]) { - return twMerge(clsx(inputs)) -} - -export const nanoid = customAlphabet( - '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz', - 7 -) // 7-character random string - -export function createChunkDecoder() { - const decoder = new TextDecoder() - return function (chunk: Uint8Array | undefined): string { - if (!chunk) return '' - return decoder.decode(chunk, { stream: true }) - } -} - -export function random (start: number, end: number) { - return start + Math.floor(Math.random() * (end - start)) -} - -export function randomIP() { - // return `104.${random(0, 21)}.${random(0, 127)}.${random(1, 255)}` - const [ip, range] = cidr.at(random(0, cidr.length))?.split('/')! - return randomip(ip, range) -} - -export const defaultUID = 'xxx' - -export function parseHeadersFromCurl(content: string) { - const re = /-H '([^:]+):\s*([^']+)/mg - const headers: HeadersInit = {} - content = content.replaceAll('-H "', '-H \'').replaceAll('" ^', '\'\\').replaceAll('^\\^"', '"') // 将 cmd curl 转成 bash curl - content.replace(re, (_: string, key: string, value: string) => { - headers[key] = value - return '' - }) - return headers -} - -export const ChunkKeys = ['BING_HEADER', 'BING_HEADER1', 'BING_HEADER2'] -export function encodeHeadersToCookie(content: string) { - const base64Content = btoa(content) - const contentChunks = base64Content.match(/.{1,4000}/g) || [] - return ChunkKeys.map((key, index) => `${key}=${contentChunks[index] ?? ''}`) -} - -export function extraCurlFromCookie(cookies: Partial<{ [key: string]: string }>) { - let base64Content = '' - ChunkKeys.forEach((key) => { - base64Content += (cookies[key] || '') - }) - try { - return atob(base64Content) - } catch(e) { - return '' - } -} - -export function extraHeadersFromCookie(cookies: Partial<{ [key: string]: string }>) { - return parseHeadersFromCurl(extraCurlFromCookie(cookies)) -} - -export function formatDate(input: string | number | Date): string { - const date = new Date(input) - return date.toLocaleDateString('en-US', { - month: 'long', - day: 'numeric', - year: 'numeric' - }) -} - -export function parseCookie(cookie: string, cookieName: string) { - const targetCookie = new RegExp(`(?:[; ]|^)${cookieName}=([^;]*)`).test(cookie) ? RegExp.$1 : cookie - return targetCookie ? decodeURIComponent(targetCookie).trim() : cookie.indexOf('=') === -1 ? cookie.trim() : '' -} - -export function setCookie(key: string, value: string) { - const maxAge = value ? 86400 * 30 : 0 - document.cookie = `${key}=${value || ''}; Path=/; Max-Age=${maxAge}; SameSite=None; Secure` -} - -export function getCookie(cookieName: string) { - const re = new RegExp(`(?:[; ]|^)${cookieName}=([^;]*)`) - return re.test(document.cookie) ? RegExp.$1 : '' -} - -export function parseCookies(cookie: string, cookieNames: string[]) { - const cookies: { [key: string]: string } = {} - cookieNames.forEach(cookieName => { - cookies[cookieName] = parseCookie(cookie, cookieName) - }) - return cookies -} - -export const DEFAULT_UA = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.0.0' - -export function parseUA(ua?: string, default_ua = DEFAULT_UA) { - return / EDGE?/i.test(decodeURIComponent(ua || '')) ? decodeURIComponent(ua!.trim()) : default_ua -} - -export function mockUser(cookies: Partial<{ [key: string]: string }>) { - const { - BING_UA = process.env.BING_UA, - BING_IP, - _U = defaultUID, - } = cookies - const ua = parseUA(BING_UA) - - return { - 'x-forwarded-for': BING_IP!, - 'Accept-Encoding': 'gzip, deflate, br', - 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', - 'User-Agent': ua!, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.3 OS/Win32', - cookie: `_U=${_U}` || '', - } -} - -export function createHeaders(cookies: Partial<{ [key: string]: string }>, type?: string) { - let { - BING_HEADER = process.env.BING_HEADER, - BING_IP, - IMAGE_ONLY = process.env.IMAGE_ONLY ?? '1', - } = cookies - const imageOnly = /^(1|true|yes)$/.test(String(IMAGE_ONLY)) - if (BING_HEADER) { - if ( - (imageOnly && type === 'image') - || !imageOnly - ) { - const headers = extraHeadersFromCookie({ - BING_HEADER, - ...cookies, - }) || {} - headers['x-forward-for'] = BING_IP! - return headers - } - } - return mockUser(cookies) -} - -export class WatchDog { - private tid = 0 - watch(fn: Function, timeout = 2000) { - clearTimeout(this.tid) - this.tid = setTimeout(fn, timeout + Math.random() * 1000) - } - reset() { - clearTimeout(this.tid) - } -} diff --git a/spaces/YlcldKlns/bing/tests/parse.ts b/spaces/YlcldKlns/bing/tests/parse.ts deleted file mode 100644 index 92940fe6315f1d7cb2b267ba5e5a7e26460a1de3..0000000000000000000000000000000000000000 --- a/spaces/YlcldKlns/bing/tests/parse.ts +++ /dev/null @@ -1,13 +0,0 @@ -import { promises as fs } from 'fs' -import { join } from 'path' -import { parseHeadersFromCurl } from '@/lib/utils' - -(async () => { - const content = await fs.readFile(join(__dirname, './fixtures/curl.txt'), 'utf-8') - const headers = parseHeadersFromCurl(content) - console.log(headers) - - const cmdContent = await fs.readFile(join(__dirname, './fixtures/cmd.txt'), 'utf-8') - const cmdHeaders = parseHeadersFromCurl(cmdContent) - console.log(cmdHeaders) -})() diff --git a/spaces/Yram/Docker/Dockerfile b/spaces/Yram/Docker/Dockerfile deleted file mode 100644 index 6c01c09373883afcb4ea34ae2d316cd596e1737b..0000000000000000000000000000000000000000 --- a/spaces/Yram/Docker/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM node:18-bullseye-slim - -RUN apt-get update && \ - -apt-get install -y git - -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app - -WORKDIR /app - -RUN npm install - -COPY Dockerfile greeting.md* .env* ./ - -RUN npm run build - -EXPOSE 7860 - -ENV NODE_ENV=production - -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/YuAnthony/Audio-Caption/coco_caption/get_stanford_models.sh b/spaces/YuAnthony/Audio-Caption/coco_caption/get_stanford_models.sh deleted file mode 100644 index 50876780558a9b818a7c8187c75cfd49a2264811..0000000000000000000000000000000000000000 --- a/spaces/YuAnthony/Audio-Caption/coco_caption/get_stanford_models.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env sh -# This script downloads the Stanford CoreNLP models. - -CORENLP=stanford-corenlp-full-2015-12-09 -SPICELIB=pycocoevalcap/spice/lib -JAR=stanford-corenlp-3.6.0 - -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -cd $DIR - -if [ -f $SPICELIB/$JAR.jar ]; then - echo "Found Stanford CoreNLP." -else - echo "Downloading..." - wget http://nlp.stanford.edu/software/$CORENLP.zip - echo "Unzipping..." - unzip $CORENLP.zip -d $SPICELIB/ - mv $SPICELIB/$CORENLP/$JAR.jar $SPICELIB/ - mv $SPICELIB/$CORENLP/$JAR-models.jar $SPICELIB/ - rm -f $CORENLP.zip - rm -rf $SPICELIB/$CORENLP/ - echo "Done." -fi diff --git a/spaces/ZenXir/FreeVC/speaker_encoder/data_objects/speaker_batch.py b/spaces/ZenXir/FreeVC/speaker_encoder/data_objects/speaker_batch.py deleted file mode 100644 index 4485605e3ece5b491d1e7d0f223c543b6c91eb96..0000000000000000000000000000000000000000 --- a/spaces/ZenXir/FreeVC/speaker_encoder/data_objects/speaker_batch.py +++ /dev/null @@ -1,12 +0,0 @@ -import numpy as np -from typing import List -from speaker_encoder.data_objects.speaker import Speaker - -class SpeakerBatch: - def __init__(self, speakers: List[Speaker], utterances_per_speaker: int, n_frames: int): - self.speakers = speakers - self.partials = {s: s.random_partial(utterances_per_speaker, n_frames) for s in speakers} - - # Array of shape (n_speakers * n_utterances, n_frames, mel_n), e.g. for 3 speakers with - # 4 utterances each of 160 frames of 40 mel coefficients: (12, 160, 40) - self.data = np.array([frames for s in speakers for _, frames, _ in self.partials[s]]) diff --git a/spaces/abdvl/datahub_qa_bot/docs/how/search.md b/spaces/abdvl/datahub_qa_bot/docs/how/search.md deleted file mode 100644 index ba1cdaf8b5e1965748ff807b3eeb5f034f153490..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/how/search.md +++ /dev/null @@ -1,291 +0,0 @@ -import FeatureAvailability from '@site/src/components/FeatureAvailability'; - -# About DataHub Search - - - - - - - -The **search bar** is an important mechanism for discovering data assets in DataHub. From the search bar, you can find Datasets, Columns, Dashboards, Charts, Data Pipelines, and more. Simply type in a term and press 'enter'. - -

    - -

    - -**Advanced queries** and the **filter sidebar** helps fine tuning queries. For programmatic users Datahub provides a **GraphQL API** as well. - -## Search Setup, Prerequisites, and Permissions - -Search is available for all users. Although Search works out of the box, the more relevant data you ingest, the better the results are. - -## Using Search - -Searching is as easy as typing in relevant business terms and pressing 'enter' to view matching data assets. - -By default, search terms will match against different aspects of a data assets. This includes asset names, descriptions, tags, terms, owners, and even specific attributes like the names of columns in a table. - - -### Filters - -The filters sidebar sits on the left hand side of search results, and lets users find assets by drilling down. You can quickly filter by Data Platform (e.g. Snowflake), Tags, Glossary Terms, Domain, Owners, and more with a single click. - -

    - -

    - -### Advanced Filters - -Using the Advanced Filter view, you can apply more complex filters. To get there, click 'Advanced' in the top right of the filter panel: - -

    - -

    - -#### Adding an Advanced Filter - -Currently, Advanced Filters support filtering by Column Name, Container, Domain, Description (entity or column level), Tag (entity or column level), Glossary Term (entity or column level), Owner, Entity Type, Subtype, Environment and soft-deleted status. - -To add a new filter, click the add filter menu, choose a filter type, and then fill in the values you want to filter by. - -

    - -

    - -#### Matching Any Advanced Filter - -By default, all filters must be matched in order for a result to appear. For example, if you add a tag filter and a platform filter, all results will have the tag and the platform. You can set the results to match any filter instead. Click on `all filters` and select `any filter` from the drop-down menu. - -

    - -

    - -#### Negating An Advanced Filter - -After creating a filter, you can choose whether results should or should not match it. Change this by clicking the operation in the top right of the filter and selecting the negated operation. - -

    - -

    - - -### Results - -Search results appear ranked by their relevance. In self-hosted DataHub ranking is based on how closely the query matched textual fields of an asset and its metadata. In Managed DataHub, ranking is based on a combination of textual relevance, usage (queries / views), and change frequency. - -With better metadata comes better results. Learn more about ingestion technical metadata in the [metadata ingestion](../../metadata-ingestion/README.md) guide. - -### Advanced queries - -The search bar supports advanced queries with pattern matching, logical expressions and filtering by specific field matches. - -The following are use cases with example search phrases. Additionally, an example link is provided for our demo instance. -These examples are non exhaustive and using Datasets as a reference. - -If you want to: - -- Exact match on term or phrase - - ```"datahub_schema"``` [Sample results](https://demo.datahubproject.io/search?page=1&query=%22datahub_schema%22) - - ```datahub_schema``` [Sample results](https://demo.datahubproject.io/search?page=1&query=datahub_schema) - - Enclosing one or more terms with double quotes will enforce exact matching on these terms, preventing further tokenization. - -- Exclude terms - - ```logging -snowflake``` [Sample results](https://demo.datahubproject.io/search?page=1&query=logging%20-snowflake) - - Results can be excluded by term using `-` to negate the term. - -- Term boolean logic with precedence - - ```logging + (-snowflake | os_audit_log)``` [Sample results](https://demo.datahubproject.io/search?page=1&query=logging%20%2B%20%28-snowflake%20%7C%20os_audit_log%29) - - `(` `)` can be used to set precedence of boolean term expressions - -- Find a dataset with the word **mask** in the name: - - ```/q name: *mask*``` [Sample results](https://demo.datahubproject.io/search?page=1&query=%2Fq%20name%253A%2520%2Amask%2A) - - This will return entities with **mask** in the name. Names tends to be connected by other symbols, hence the wildcard symbols before and after the word. - -- Find a dataset with a property, **encoding** - - ```/q customProperties: encoding*``` [Sample results](https://demo.datahubproject.io/search?page=1&query=%2Fq%20customProperties%3A%20encoding%2A) - - Dataset Properties are indexed in ElasticSearch the manner of key=value. Hence if you know the precise key-value pair, you can search using ```"key=value"```. However, if you only know the key, you can use wildcards to replace the value and that is what is being done here. - -- Find a dataset with a column name, **latitude** - - ```/q fieldPaths: latitude``` [Sample results](https://demo.datahubproject.io/search?page=1&query=%2Fq%20fieldPaths%3A%20latitude) - - fieldPaths is the name of the attribute that holds the column name in Datasets. - -- Find a dataset with the term **latitude** in the field description - - ```/q editedFieldDescriptions: latitude OR fieldDescriptions: latitude``` [Sample results](https://demo.datahubproject.io/search?page=1&query=%2Fq%20editedFieldDescriptions%3A%20latitude%20OR%20fieldDescriptions%3A%20latitude) - - Datasets has 2 attributes that contains field description. fieldDescription comes from the SchemaMetadata aspect, while editedFieldDescriptions comes from the EditableSchemaMetadata aspect. EditableSchemaMetadata holds information that comes from UI edits, while SchemaMetadata holds data from ingestion of the dataset. - -- Find a dataset with the term **logical** in the dataset description - - ```/q editedDescription: *logical* OR description: *logical*``` [Sample results](https://demo.datahubproject.io/search?page=1&query=%2Fq%20editedDescription%3A%20%2Alogical%2A%20OR%20description%3A%20%2Alogical%2A) - - Similar to field descriptions, dataset descriptions can be found in 2 aspects, hence the need to search 2 attributes. - -- Find a dataset which resides in one of the browsing folders, for instance, the **hive** folder - - ```/q browsePaths: *hive*``` [Sample results](https://demo.datahubproject.io/search?page=1&query=%2Fq%20browsePaths%3A%20%2Ahive%2A) - - BrowsePath is stored as a complete string, for instance ```/datasets/prod/hive/SampleKafkaDataset```, hence the need for wildcards on both ends of the term to return a result. - -- Find a dataset without the **name** field - - ```/q -_exists_:name``` [Sample results](https://demo.datahubproject.io/search?filter_entity___false___EQUAL___0=DATASET&page=1&query=%252Fq%2520-_exists_%253Aname&unionType=0) - - the `-` is negating the existence of the field name. - - - -### Videos - -**What can you do with DataHub?** - -

    - -

    - - -### GraphQL - -* [searchAcrossEntities](https://datahubproject.io/docs/graphql/queries/#searchacrossentities) -* You can try out the API on the demo instance's public GraphQL interface: [here](https://demo.datahubproject.io/api/graphiql) - -The same GraphQL API that powers the Search UI can be used -for integrations and programmatic use-cases. - -``` -# Example query -{ - searchAcrossEntities( - input: {types: [], query: "*", start: 0, count: 10, filters: [{field: "fieldTags", value: "urn:li:tag:Dimension"}]} - ) { - start - count - total - searchResults { - entity { - type - ... on Dataset { - urn - type - platform { - name - } - name - } - } - } - } -} -``` - - -### DataHub Blog -* [Using DataHub for Search & Discovery](https://blog.datahubproject.io/using-datahub-for-search-discovery-fa309089be22) - -## FAQ and Troubleshooting - -**How are the results ordered?** - -The order of the search results is based on the weight what Datahub gives them based on our search algorithm. The current algorithm in OSS DataHub is based on a text-match score from Elasticsearch. - -**Where to find more information?** - -The sample queries here are non exhaustive. [The link here](https://demo.datahubproject.io/tag/urn:li:tag:Searchable) shows the current list of indexed fields for each entity inside Datahub. Click on the fields inside each entity and see which field has the tag ```Searchable```. -However, it does not tell you the specific attribute name to use for specialized searches. One way to do so is to inspect the ElasticSearch indices, for example: -`curl http://localhost:9200/_cat/indices` returns all the ES indices in the ElasticSearch container. - -``` -yellow open chartindex_v2_1643510690325 bQO_RSiCSUiKJYsmJClsew 1 1 2 0 8.5kb 8.5kb -yellow open mlmodelgroupindex_v2_1643510678529 OjIy0wb7RyKqLz3uTENRHQ 1 1 0 0 208b 208b -yellow open dataprocessindex_v2_1643510676831 2w-IHpuiTUCs6e6gumpYHA 1 1 0 0 208b 208b -yellow open corpgroupindex_v2_1643510673894 O7myCFlqQWKNtgsldzBS6g 1 1 3 0 16.8kb 16.8kb -yellow open corpuserindex_v2_1643510672335 0rIe_uIQTjme5Wy61MFbaw 1 1 6 2 32.4kb 32.4kb -yellow open datasetindex_v2_1643510688970 bjBfUEswSoSqPi3BP4iqjw 1 1 15 0 29.2kb 29.2kb -yellow open dataflowindex_v2_1643510681607 N8CMlRFvQ42rnYMVDaQJ2g 1 1 1 0 10.2kb 10.2kb -yellow open dataset_datasetusagestatisticsaspect_v1_1643510694706 kdqvqMYLRWq1oZt1pcAsXQ 1 1 4 0 8.9kb 8.9kb -yellow open .ds-datahub_usage_event-000003 YMVcU8sHTFilUwyI4CWJJg 1 1 186 0 203.9kb 203.9kb -yellow open datajob_datahubingestioncheckpointaspect_v1 nTXJf7C1Q3GoaIJ71gONxw 1 1 0 0 208b 208b -yellow open .ds-datahub_usage_event-000004 XRFwisRPSJuSr6UVmmsCsg 1 1 196 0 165.5kb 165.5kb -yellow open .ds-datahub_usage_event-000005 d0O6l5wIRLOyG6iIfAISGw 1 1 77 0 108.1kb 108.1kb -yellow open dataplatformindex_v2_1643510671426 _4SIIhfAT8yq_WROufunXA 1 1 0 0 208b 208b -yellow open mlmodeldeploymentindex_v2_1643510670629 n81eJIypSp2Qx-fpjZHgRw 1 1 0 0 208b 208b -yellow open .ds-datahub_usage_event-000006 oyrWKndjQ-a8Rt1IMD9aSA 1 1 143 0 127.1kb 127.1kb -yellow open mlfeaturetableindex_v2_1643510677164 iEXPt637S1OcilXpxPNYHw 1 1 5 0 8.9kb 8.9kb -yellow open .ds-datahub_usage_event-000001 S9EnGj64TEW8O3sLUb9I2Q 1 1 257 0 163.9kb 163.9kb -yellow open .ds-datahub_usage_event-000002 2xJyvKG_RYGwJOG9yq8pJw 1 1 44 0 155.4kb 155.4kb -yellow open dataset_datasetprofileaspect_v1_1643510693373 uahwTHGRRAC7w1c2VqVy8g 1 1 31 0 18.9kb 18.9kb -yellow open mlprimarykeyindex_v2_1643510687579 MUcmT8ASSASzEpLL98vrWg 1 1 7 0 9.5kb 9.5kb -yellow open glossarytermindex_v2_1643510686127 cQL8Pg6uQeKfMly9GPhgFQ 1 1 3 0 10kb 10kb -yellow open datajob_datahubingestionrunsummaryaspect_v1 rk22mIsDQ02-52MpWLm1DA 1 1 0 0 208b 208b -yellow open mlmodelindex_v2_1643510675399 gk-WSTVjRZmkDU5ggeFSqg 1 1 1 0 10.3kb 10.3kb -yellow open dashboardindex_v2_1643510691686 PQjSaGhTRqWW6zYjcqXo6Q 1 1 1 0 8.7kb 8.7kb -yellow open datahubpolicyindex_v2_1643510671774 ZyTrYx3-Q1e-7dYq1kn5Gg 1 1 0 0 208b 208b -yellow open datajobindex_v2_1643510682977 K-rbEyjBS6ew5uOQQS4sPw 1 1 2 0 11.3kb 11.3kb -yellow open datahubretentionindex_v2 8XrQTPwRTX278mx1SrNwZA 1 1 0 0 208b 208b -yellow open glossarynodeindex_v2_1643510678826 Y3_bCz0YR2KPwCrrVngDdA 1 1 1 0 7.4kb 7.4kb -yellow open system_metadata_service_v1 36spEDbDTdKgVlSjE8t-Jw 1 1 387 8 63.2kb 63.2kb -yellow open schemafieldindex_v2_1643510684410 tZ1gC3haTReRLmpCxirVxQ 1 1 0 0 208b 208b -yellow open mlfeatureindex_v2_1643510680246 aQO5HF0mT62Znn-oIWBC8A 1 1 20 0 17.4kb 17.4kb -yellow open tagindex_v2_1643510684785 PfnUdCUORY2fnF3I3W7HwA 1 1 3 1 18.6kb 18.6kb -``` - -The index name will vary from instance to instance. Indexed information about Datasets can be found in: -`curl http://localhost:9200/datasetindex_v2_1643510688970/_search?=pretty` - -example information of a dataset: - -``` -{ - "_index" : "datasetindex_v2_1643510688970", - "_type" : "_doc", - "_id" : "urn%3Ali%3Adataset%3A%28urn%3Ali%3AdataPlatform%3Akafka%2CSampleKafkaDataset%2CPROD%29", - "_score" : 1.0, - "_source" : { - "urn" : "urn:li:dataset:(urn:li:dataPlatform:kafka,SampleKafkaDataset,PROD)", - "name" : "SampleKafkaDataset", - "browsePaths" : [ - "/prod/kafka/SampleKafkaDataset" - ], - "origin" : "PROD", - "customProperties" : [ - "prop2=pikachu", - "prop1=fakeprop" - ], - "hasDescription" : false, - "hasOwners" : true, - "owners" : [ - "urn:li:corpuser:jdoe", - "urn:li:corpuser:datahub" - ], - "fieldPaths" : [ - "[version=2.0].[type=boolean].field_foo_2", - "[version=2.0].[type=boolean].field_bar", - "[version=2.0].[key=True].[type=int].id" - ], - "fieldGlossaryTerms" : [ ], - "fieldDescriptions" : [ - "Foo field description", - "Bar field description", - "Id specifying which partition the message should go to" - ], - "fieldTags" : [ - "urn:li:tag:NeedsDocumentation" - ], - "platform" : "urn:li:dataPlatform:kafka" - } - }, -``` - - - -*Need more help? Join the conversation in [Slack](http://slack.datahubproject.io)!* - -### Related Features - -* [Metadata ingestion framework](../../metadata-ingestion/README.md) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/mask/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/mask/__init__.py deleted file mode 100644 index ab1e88bc686d5c2fe72b3114cb2b3e372e73a0f8..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/mask/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .mask_target import mask_target -from .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks -from .utils import encode_mask_results, split_combined_polys - -__all__ = [ - 'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks', - 'PolygonMasks', 'encode_mask_results' -] diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/datasets/coco.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/datasets/coco.py deleted file mode 100644 index 3a8e1bcfdd7f2854ca381d4f87788e3a63eb568c..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/datasets/coco.py +++ /dev/null @@ -1,546 +0,0 @@ -import itertools -import logging -import os.path as osp -import tempfile -from collections import OrderedDict - -import mmcv -import numpy as np -import pycocotools -from mmcv.utils import print_log -from pycocotools.coco import COCO -from pycocotools.cocoeval import COCOeval -from terminaltables import AsciiTable - -from mmdet.core import eval_recalls -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class CocoDataset(CustomDataset): - - CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', - 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', - 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', - 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', - 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', - 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', - 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', - 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', - 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', - 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', - 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', - 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', - 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush') - - def load_annotations(self, ann_file): - """Load annotation from COCO style annotation file. - - Args: - ann_file (str): Path of annotation file. - - Returns: - list[dict]: Annotation info from COCO api. - """ - if not getattr(pycocotools, '__version__', '0') >= '12.0.2': - raise AssertionError( - 'Incompatible version of pycocotools is installed. ' - 'Run pip uninstall pycocotools first. Then run pip ' - 'install mmpycocotools to install open-mmlab forked ' - 'pycocotools.') - - self.coco = COCO(ann_file) - self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES) - self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} - self.img_ids = self.coco.get_img_ids() - data_infos = [] - total_ann_ids = [] - for i in self.img_ids: - info = self.coco.load_imgs([i])[0] - info['filename'] = info['file_name'] - data_infos.append(info) - ann_ids = self.coco.get_ann_ids(img_ids=[i]) - total_ann_ids.extend(ann_ids) - assert len(set(total_ann_ids)) == len( - total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!" - return data_infos - - def get_ann_info(self, idx): - """Get COCO annotation by index. - - Args: - idx (int): Index of data. - - Returns: - dict: Annotation info of specified index. - """ - - img_id = self.data_infos[idx]['id'] - ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) - ann_info = self.coco.load_anns(ann_ids) - return self._parse_ann_info(self.data_infos[idx], ann_info) - - def get_cat_ids(self, idx): - """Get COCO category ids by index. - - Args: - idx (int): Index of data. - - Returns: - list[int]: All categories in the image of specified index. - """ - - img_id = self.data_infos[idx]['id'] - ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) - ann_info = self.coco.load_anns(ann_ids) - return [ann['category_id'] for ann in ann_info] - - def _filter_imgs(self, min_size=32): - """Filter images too small or without ground truths.""" - valid_inds = [] - # obtain images that contain annotation - ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) - # obtain images that contain annotations of the required categories - ids_in_cat = set() - for i, class_id in enumerate(self.cat_ids): - ids_in_cat |= set(self.coco.cat_img_map[class_id]) - # merge the image id sets of the two conditions and use the merged set - # to filter out images if self.filter_empty_gt=True - ids_in_cat &= ids_with_ann - - valid_img_ids = [] - for i, img_info in enumerate(self.data_infos): - img_id = self.img_ids[i] - if self.filter_empty_gt and img_id not in ids_in_cat: - continue - if min(img_info['width'], img_info['height']) >= min_size: - valid_inds.append(i) - valid_img_ids.append(img_id) - self.img_ids = valid_img_ids - return valid_inds - - def _parse_ann_info(self, img_info, ann_info): - """Parse bbox and mask annotation. - - Args: - ann_info (list[dict]): Annotation info of an image. - with_mask (bool): Whether to parse mask annotations. - - Returns: - dict: A dict containing the following keys: bboxes, bboxes_ignore,\ - labels, masks, seg_map. "masks" are raw annotations and not \ - decoded into binary masks. - """ - gt_bboxes = [] - gt_labels = [] - gt_bboxes_ignore = [] - gt_masks_ann = [] - for i, ann in enumerate(ann_info): - if ann.get('ignore', False): - continue - x1, y1, w, h = ann['bbox'] - inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0)) - inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0)) - if inter_w * inter_h == 0: - continue - if ann['area'] <= 0 or w < 1 or h < 1: - continue - if ann['category_id'] not in self.cat_ids: - continue - bbox = [x1, y1, x1 + w, y1 + h] - if ann.get('iscrowd', False): - gt_bboxes_ignore.append(bbox) - else: - gt_bboxes.append(bbox) - gt_labels.append(self.cat2label[ann['category_id']]) - gt_masks_ann.append(ann.get('segmentation', None)) - - if gt_bboxes: - gt_bboxes = np.array(gt_bboxes, dtype=np.float32) - gt_labels = np.array(gt_labels, dtype=np.int64) - else: - gt_bboxes = np.zeros((0, 4), dtype=np.float32) - gt_labels = np.array([], dtype=np.int64) - - if gt_bboxes_ignore: - gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) - else: - gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) - - seg_map = img_info['filename'].replace('jpg', 'png') - - ann = dict( - bboxes=gt_bboxes, - labels=gt_labels, - bboxes_ignore=gt_bboxes_ignore, - masks=gt_masks_ann, - seg_map=seg_map) - - return ann - - def xyxy2xywh(self, bbox): - """Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO - evaluation. - - Args: - bbox (numpy.ndarray): The bounding boxes, shape (4, ), in - ``xyxy`` order. - - Returns: - list[float]: The converted bounding boxes, in ``xywh`` order. - """ - - _bbox = bbox.tolist() - return [ - _bbox[0], - _bbox[1], - _bbox[2] - _bbox[0], - _bbox[3] - _bbox[1], - ] - - def _proposal2json(self, results): - """Convert proposal results to COCO json style.""" - json_results = [] - for idx in range(len(self)): - img_id = self.img_ids[idx] - bboxes = results[idx] - for i in range(bboxes.shape[0]): - data = dict() - data['image_id'] = img_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(bboxes[i][4]) - data['category_id'] = 1 - json_results.append(data) - return json_results - - def _det2json(self, results): - """Convert detection results to COCO json style.""" - json_results = [] - for idx in range(len(self)): - img_id = self.img_ids[idx] - result = results[idx] - for label in range(len(result)): - bboxes = result[label] - for i in range(bboxes.shape[0]): - data = dict() - data['image_id'] = img_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(bboxes[i][4]) - data['category_id'] = self.cat_ids[label] - json_results.append(data) - return json_results - - def _segm2json(self, results): - """Convert instance segmentation results to COCO json style.""" - bbox_json_results = [] - segm_json_results = [] - for idx in range(len(self)): - img_id = self.img_ids[idx] - det, seg = results[idx] - for label in range(len(det)): - # bbox results - bboxes = det[label] - for i in range(bboxes.shape[0]): - data = dict() - data['image_id'] = img_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(bboxes[i][4]) - data['category_id'] = self.cat_ids[label] - bbox_json_results.append(data) - - # segm results - # some detectors use different scores for bbox and mask - if isinstance(seg, tuple): - segms = seg[0][label] - mask_score = seg[1][label] - else: - segms = seg[label] - mask_score = [bbox[4] for bbox in bboxes] - for i in range(bboxes.shape[0]): - data = dict() - data['image_id'] = img_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(mask_score[i]) - data['category_id'] = self.cat_ids[label] - if isinstance(segms[i]['counts'], bytes): - segms[i]['counts'] = segms[i]['counts'].decode() - data['segmentation'] = segms[i] - segm_json_results.append(data) - return bbox_json_results, segm_json_results - - def results2json(self, results, outfile_prefix): - """Dump the detection results to a COCO style json file. - - There are 3 types of results: proposals, bbox predictions, mask - predictions, and they have different data types. This method will - automatically recognize the type, and dump them to json files. - - Args: - results (list[list | tuple | ndarray]): Testing results of the - dataset. - outfile_prefix (str): The filename prefix of the json files. If the - prefix is "somepath/xxx", the json files will be named - "somepath/xxx.bbox.json", "somepath/xxx.segm.json", - "somepath/xxx.proposal.json". - - Returns: - dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \ - values are corresponding filenames. - """ - result_files = dict() - if isinstance(results[0], list): - json_results = self._det2json(results) - result_files['bbox'] = f'{outfile_prefix}.bbox.json' - result_files['proposal'] = f'{outfile_prefix}.bbox.json' - mmcv.dump(json_results, result_files['bbox']) - elif isinstance(results[0], tuple): - json_results = self._segm2json(results) - result_files['bbox'] = f'{outfile_prefix}.bbox.json' - result_files['proposal'] = f'{outfile_prefix}.bbox.json' - result_files['segm'] = f'{outfile_prefix}.segm.json' - mmcv.dump(json_results[0], result_files['bbox']) - mmcv.dump(json_results[1], result_files['segm']) - elif isinstance(results[0], np.ndarray): - json_results = self._proposal2json(results) - result_files['proposal'] = f'{outfile_prefix}.proposal.json' - mmcv.dump(json_results, result_files['proposal']) - else: - raise TypeError('invalid type of results') - return result_files - - def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None): - gt_bboxes = [] - for i in range(len(self.img_ids)): - ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i]) - ann_info = self.coco.load_anns(ann_ids) - if len(ann_info) == 0: - gt_bboxes.append(np.zeros((0, 4))) - continue - bboxes = [] - for ann in ann_info: - if ann.get('ignore', False) or ann['iscrowd']: - continue - x1, y1, w, h = ann['bbox'] - bboxes.append([x1, y1, x1 + w, y1 + h]) - bboxes = np.array(bboxes, dtype=np.float32) - if bboxes.shape[0] == 0: - bboxes = np.zeros((0, 4)) - gt_bboxes.append(bboxes) - - recalls = eval_recalls( - gt_bboxes, results, proposal_nums, iou_thrs, logger=logger) - ar = recalls.mean(axis=1) - return ar - - def format_results(self, results, jsonfile_prefix=None, **kwargs): - """Format the results to json (standard format for COCO evaluation). - - Args: - results (list[tuple | numpy.ndarray]): Testing results of the - dataset. - jsonfile_prefix (str | None): The prefix of json files. It includes - the file path and the prefix of filename, e.g., "a/b/prefix". - If not specified, a temp file will be created. Default: None. - - Returns: - tuple: (result_files, tmp_dir), result_files is a dict containing \ - the json filepaths, tmp_dir is the temporal directory created \ - for saving json files when jsonfile_prefix is not specified. - """ - assert isinstance(results, list), 'results must be a list' - assert len(results) == len(self), ( - 'The length of results is not equal to the dataset len: {} != {}'. - format(len(results), len(self))) - - if jsonfile_prefix is None: - tmp_dir = tempfile.TemporaryDirectory() - jsonfile_prefix = osp.join(tmp_dir.name, 'results') - else: - tmp_dir = None - result_files = self.results2json(results, jsonfile_prefix) - return result_files, tmp_dir - - def evaluate(self, - results, - metric='bbox', - logger=None, - jsonfile_prefix=None, - classwise=False, - proposal_nums=(100, 300, 1000), - iou_thrs=None, - metric_items=None): - """Evaluation in COCO protocol. - - Args: - results (list[list | tuple]): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. Options are - 'bbox', 'segm', 'proposal', 'proposal_fast'. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - jsonfile_prefix (str | None): The prefix of json files. It includes - the file path and the prefix of filename, e.g., "a/b/prefix". - If not specified, a temp file will be created. Default: None. - classwise (bool): Whether to evaluating the AP for each class. - proposal_nums (Sequence[int]): Proposal number used for evaluating - recalls, such as recall@100, recall@1000. - Default: (100, 300, 1000). - iou_thrs (Sequence[float], optional): IoU threshold used for - evaluating recalls/mAPs. If set to a list, the average of all - IoUs will also be computed. If not specified, [0.50, 0.55, - 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used. - Default: None. - metric_items (list[str] | str, optional): Metric items that will - be returned. If not specified, ``['AR@100', 'AR@300', - 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be - used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75', - 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when - ``metric=='bbox' or metric=='segm'``. - - Returns: - dict[str, float]: COCO style evaluation metric. - """ - - metrics = metric if isinstance(metric, list) else [metric] - allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] - for metric in metrics: - if metric not in allowed_metrics: - raise KeyError(f'metric {metric} is not supported') - if iou_thrs is None: - iou_thrs = np.linspace( - .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) - if metric_items is not None: - if not isinstance(metric_items, list): - metric_items = [metric_items] - - result_files, tmp_dir = self.format_results(results, jsonfile_prefix) - - eval_results = OrderedDict() - cocoGt = self.coco - for metric in metrics: - msg = f'Evaluating {metric}...' - if logger is None: - msg = '\n' + msg - print_log(msg, logger=logger) - - if metric == 'proposal_fast': - ar = self.fast_eval_recall( - results, proposal_nums, iou_thrs, logger='silent') - log_msg = [] - for i, num in enumerate(proposal_nums): - eval_results[f'AR@{num}'] = ar[i] - log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}') - log_msg = ''.join(log_msg) - print_log(log_msg, logger=logger) - continue - - if metric not in result_files: - raise KeyError(f'{metric} is not in results') - try: - cocoDt = cocoGt.loadRes(result_files[metric]) - except IndexError: - print_log( - 'The testing results of the whole dataset is empty.', - logger=logger, - level=logging.ERROR) - break - - iou_type = 'bbox' if metric == 'proposal' else metric - cocoEval = COCOeval(cocoGt, cocoDt, iou_type) - cocoEval.params.catIds = self.cat_ids - cocoEval.params.imgIds = self.img_ids - cocoEval.params.maxDets = list(proposal_nums) - cocoEval.params.iouThrs = iou_thrs - # mapping of cocoEval.stats - coco_metric_names = { - 'mAP': 0, - 'mAP_50': 1, - 'mAP_75': 2, - 'mAP_s': 3, - 'mAP_m': 4, - 'mAP_l': 5, - 'AR@100': 6, - 'AR@300': 7, - 'AR@1000': 8, - 'AR_s@1000': 9, - 'AR_m@1000': 10, - 'AR_l@1000': 11 - } - if metric_items is not None: - for metric_item in metric_items: - if metric_item not in coco_metric_names: - raise KeyError( - f'metric item {metric_item} is not supported') - - if metric == 'proposal': - cocoEval.params.useCats = 0 - cocoEval.evaluate() - cocoEval.accumulate() - cocoEval.summarize() - if metric_items is None: - metric_items = [ - 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', - 'AR_m@1000', 'AR_l@1000' - ] - - for item in metric_items: - val = float( - f'{cocoEval.stats[coco_metric_names[item]]:.3f}') - eval_results[item] = val - else: - cocoEval.evaluate() - cocoEval.accumulate() - cocoEval.summarize() - if classwise: # Compute per-category AP - # Compute per-category AP - # from https://github.com/facebookresearch/detectron2/ - precisions = cocoEval.eval['precision'] - # precision: (iou, recall, cls, area range, max dets) - assert len(self.cat_ids) == precisions.shape[2] - - results_per_category = [] - for idx, catId in enumerate(self.cat_ids): - # area range index 0: all area ranges - # max dets index -1: typically 100 per image - nm = self.coco.loadCats(catId)[0] - precision = precisions[:, :, idx, 0, -1] - precision = precision[precision > -1] - if precision.size: - ap = np.mean(precision) - else: - ap = float('nan') - results_per_category.append( - (f'{nm["name"]}', f'{float(ap):0.3f}')) - - num_columns = min(6, len(results_per_category) * 2) - results_flatten = list( - itertools.chain(*results_per_category)) - headers = ['category', 'AP'] * (num_columns // 2) - results_2d = itertools.zip_longest(*[ - results_flatten[i::num_columns] - for i in range(num_columns) - ]) - table_data = [headers] - table_data += [result for result in results_2d] - table = AsciiTable(table_data) - print_log('\n' + table.table, logger=logger) - - if metric_items is None: - metric_items = [ - 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l' - ] - - for metric_item in metric_items: - key = f'{metric}_{metric_item}' - val = float( - f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}' - ) - eval_results[key] = val - ap = cocoEval.stats[:6] - eval_results[f'{metric}_mAP_copypaste'] = ( - f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} ' - f'{ap[4]:.3f} {ap[5]:.3f}') - if tmp_dir is not None: - tmp_dir.cleanup() - return eval_results diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/bbox/coder/yolo_bbox_coder.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/bbox/coder/yolo_bbox_coder.py deleted file mode 100644 index 2f8a0be133b313b03b1dbadb5c59f29cdcaffa22..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/bbox/coder/yolo_bbox_coder.py +++ /dev/null @@ -1,89 +0,0 @@ -import annotator.uniformer.mmcv as mmcv -import torch - -from ..builder import BBOX_CODERS -from .base_bbox_coder import BaseBBoxCoder - - -@BBOX_CODERS.register_module() -class YOLOBBoxCoder(BaseBBoxCoder): - """YOLO BBox coder. - - Following `YOLO `_, this coder divide - image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh). - cx, cy in [0., 1.], denotes relative center position w.r.t the center of - bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`. - - Args: - eps (float): Min value of cx, cy when encoding. - """ - - def __init__(self, eps=1e-6): - super(BaseBBoxCoder, self).__init__() - self.eps = eps - - @mmcv.jit(coderize=True) - def encode(self, bboxes, gt_bboxes, stride): - """Get box regression transformation deltas that can be used to - transform the ``bboxes`` into the ``gt_bboxes``. - - Args: - bboxes (torch.Tensor): Source boxes, e.g., anchors. - gt_bboxes (torch.Tensor): Target of the transformation, e.g., - ground-truth boxes. - stride (torch.Tensor | int): Stride of bboxes. - - Returns: - torch.Tensor: Box transformation deltas - """ - - assert bboxes.size(0) == gt_bboxes.size(0) - assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 - x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5 - y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5 - w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0] - h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1] - x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5 - y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5 - w = bboxes[..., 2] - bboxes[..., 0] - h = bboxes[..., 3] - bboxes[..., 1] - w_target = torch.log((w_gt / w).clamp(min=self.eps)) - h_target = torch.log((h_gt / h).clamp(min=self.eps)) - x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp( - self.eps, 1 - self.eps) - y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp( - self.eps, 1 - self.eps) - encoded_bboxes = torch.stack( - [x_center_target, y_center_target, w_target, h_target], dim=-1) - return encoded_bboxes - - @mmcv.jit(coderize=True) - def decode(self, bboxes, pred_bboxes, stride): - """Apply transformation `pred_bboxes` to `boxes`. - - Args: - boxes (torch.Tensor): Basic boxes, e.g. anchors. - pred_bboxes (torch.Tensor): Encoded boxes with shape - stride (torch.Tensor | int): Strides of bboxes. - - Returns: - torch.Tensor: Decoded boxes. - """ - assert pred_bboxes.size(0) == bboxes.size(0) - assert pred_bboxes.size(-1) == bboxes.size(-1) == 4 - x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5 - y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5 - w = bboxes[..., 2] - bboxes[..., 0] - h = bboxes[..., 3] - bboxes[..., 1] - # Get outputs x, y - x_center_pred = (pred_bboxes[..., 0] - 0.5) * stride + x_center - y_center_pred = (pred_bboxes[..., 1] - 0.5) * stride + y_center - w_pred = torch.exp(pred_bboxes[..., 2]) * w - h_pred = torch.exp(pred_bboxes[..., 3]) * h - - decoded_bboxes = torch.stack( - (x_center_pred - w_pred / 2, y_center_pred - h_pred / 2, - x_center_pred + w_pred / 2, y_center_pred + h_pred / 2), - dim=-1) - - return decoded_bboxes diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/datasets/pipelines/instaboost.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/datasets/pipelines/instaboost.py deleted file mode 100644 index 38b6819f60587a6e0c0f6d57bfda32bb3a7a4267..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/datasets/pipelines/instaboost.py +++ /dev/null @@ -1,98 +0,0 @@ -import numpy as np - -from ..builder import PIPELINES - - -@PIPELINES.register_module() -class InstaBoost(object): - r"""Data augmentation method in `InstaBoost: Boosting Instance - Segmentation Via Probability Map Guided Copy-Pasting - `_. - - Refer to https://github.com/GothicAi/Instaboost for implementation details. - """ - - def __init__(self, - action_candidate=('normal', 'horizontal', 'skip'), - action_prob=(1, 0, 0), - scale=(0.8, 1.2), - dx=15, - dy=15, - theta=(-1, 1), - color_prob=0.5, - hflag=False, - aug_ratio=0.5): - try: - import instaboostfast as instaboost - except ImportError: - raise ImportError( - 'Please run "pip install instaboostfast" ' - 'to install instaboostfast first for instaboost augmentation.') - self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob, - scale, dx, dy, theta, - color_prob, hflag) - self.aug_ratio = aug_ratio - - def _load_anns(self, results): - labels = results['ann_info']['labels'] - masks = results['ann_info']['masks'] - bboxes = results['ann_info']['bboxes'] - n = len(labels) - - anns = [] - for i in range(n): - label = labels[i] - bbox = bboxes[i] - mask = masks[i] - x1, y1, x2, y2 = bbox - # assert (x2 - x1) >= 1 and (y2 - y1) >= 1 - bbox = [x1, y1, x2 - x1, y2 - y1] - anns.append({ - 'category_id': label, - 'segmentation': mask, - 'bbox': bbox - }) - - return anns - - def _parse_anns(self, results, anns, img): - gt_bboxes = [] - gt_labels = [] - gt_masks_ann = [] - for ann in anns: - x1, y1, w, h = ann['bbox'] - # TODO: more essential bug need to be fixed in instaboost - if w <= 0 or h <= 0: - continue - bbox = [x1, y1, x1 + w, y1 + h] - gt_bboxes.append(bbox) - gt_labels.append(ann['category_id']) - gt_masks_ann.append(ann['segmentation']) - gt_bboxes = np.array(gt_bboxes, dtype=np.float32) - gt_labels = np.array(gt_labels, dtype=np.int64) - results['ann_info']['labels'] = gt_labels - results['ann_info']['bboxes'] = gt_bboxes - results['ann_info']['masks'] = gt_masks_ann - results['img'] = img - return results - - def __call__(self, results): - img = results['img'] - orig_type = img.dtype - anns = self._load_anns(results) - if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]): - try: - import instaboostfast as instaboost - except ImportError: - raise ImportError('Please run "pip install instaboostfast" ' - 'to install instaboostfast first.') - anns, img = instaboost.get_new_data( - anns, img.astype(np.uint8), self.cfg, background=None) - - results = self._parse_anns(results, anns, img.astype(orig_type)) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})' - return repr_str diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/exp/upernet_global_base/test_config_g.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/exp/upernet_global_base/test_config_g.py deleted file mode 100644 index 365549336f936ea2865640d1c467f1936be2157c..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/exp/upernet_global_base/test_config_g.py +++ /dev/null @@ -1,49 +0,0 @@ -''' - * Copyright (c) 2023 Salesforce, Inc. - * All rights reserved. - * SPDX-License-Identifier: Apache License 2.0 - * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/ - * By Can Qin - * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet - * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala - * Modified from UniFormer repo: From https://github.com/Sense-X/UniFormer - * Apache-2.0 license -''' -_base_ = [ - '../../configs/_base_/models/upernet_uniformer.py', - '../../configs/_base_/datasets/ade20k.py', - '../../configs/_base_/default_runtime.py', - '../../configs/_base_/schedules/schedule_160k.py' -] -model = dict( - backbone=dict( - type='UniFormer', - embed_dim=[64, 128, 320, 512], - layers=[5, 8, 20, 7], - head_dim=64, - drop_path_rate=0.4, - windows=False, - hybrid=False, - ), - decode_head=dict( - in_channels=[64, 128, 320, 512], - num_classes=150 - ), - auxiliary_head=dict( - in_channels=320, - num_classes=150 - )) - -# AdamW optimizer, no weight decay for position embedding & layer norm in backbone -optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01, - paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.), - 'relative_position_bias_table': dict(decay_mult=0.), - 'norm': dict(decay_mult=0.)})) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -data=dict(samples_per_gpu=2) \ No newline at end of file diff --git a/spaces/abouuuud/poetry/app.py b/spaces/abouuuud/poetry/app.py deleted file mode 100644 index 743e179975a957641a72c9206563bc53ca407c7b..0000000000000000000000000000000000000000 --- a/spaces/abouuuud/poetry/app.py +++ /dev/null @@ -1,53 +0,0 @@ -import gc -import gradio as gr -from transformers import pipeline, set_seed - -pipe = pipeline('text-generation', framework='pt', model='akhooli/ap2023', tokenizer='akhooli/ap2023') -#gc.collect() -samples = [['أنت' - ,1.0, 50, 1.0, 1.0, 114],['هل غادر' - ,1.0, 50, 1.0, 1.0, 114 ],['ألا ليت' - ,1.0, 50, 1.0, 1.0, 114 ],['يا قدس' - ,1.0, 50, 1.0, 1.0, 114],['عيد بأية حال' - ,1.0, 50, 1.0, 1.0, 114],['لكل شيء إذا ما' - ,1.0, 50, 1.0, 1.0, 114 ],['.' - ,1.0, 50, 1.0, 1.0, 114]] - -notes = """ -- Enter a short prompt or select (click) one of the examples and click SEND -- Adjust parameters (temperture, top k, top p and penalty) through the slider (keep close to default values). -- For the same seed (randomness), the same output is regenerated if other parameters are fixed. Seed should be 0 or more (not empty) -- Clear and enter new prompt or select another example and SEND to regenerate -- The '.' means start a new line from no prompt (your prompt need not be long) -- Be patient: this runs on CPU (free tier) -- Feedback (Twitter): @akhooli (https://twitter.com/akhooli/status/1611025232201977859) -- Note/Disclaimer: may generate unaccepted or inappropriate content. Use at your own risk. -""" -def sayPoetry(prompt, temp=1.0, topk = 50, topp = 1.0, penalty=1.0, seed=114): - if not int(seed) >= 0: seed=114 - set_seed(seed) - gen = pipe(prompt, max_length=96, do_sample=True, temperature=temp, top_k=topk, top_p=topp, repetition_penalty=penalty, - min_length = 64, no_repeat_ngram_size = 3, return_full_text=True, - num_beams=5, num_return_sequences=1)[0]["generated_text"] - poetry ="" - for line in gen.split('.')[:-1]: - poetry += line #+ "\n" - return poetry -poetry = gr.Interface(fn=sayPoetry, - inputs=[ - gr.Textbox(label="Enter short prompt or select from examples:"), - gr.Slider(0.70, 1.2, step=0.01,value=1.0, label='control temperature'), - gr.Slider(25, 100, step=1,value=50, label='control top k'), - gr.Slider(0.80, 1.0, step=0.01,value=1.0, label='control top p'), - gr.Slider(0.90, 1.50, step=0.01,value=1.0, label='control penalty'), - gr.Number(value=139750, precision=0, label='Seed'), - ], - outputs=[gr.Textbox(label="Generated Poetry:")], - - allow_flagging='never', - title='Arabic Poetry Generation Demo (updated Jan. 2023)', - description = "A simple demo of AI generated poetry based on 1M poems fine-tuned using AraGPT2 (be patient, runs on cpu)", - examples=samples, - cache_examples=False, - article = notes) -poetry.launch() \ No newline at end of file diff --git a/spaces/akhaliq/BlendGAN/ffhq_dataset/gen_aligned_image.py b/spaces/akhaliq/BlendGAN/ffhq_dataset/gen_aligned_image.py deleted file mode 100644 index 7a67b204e3b67304b1e5e4963fee387cbfa58096..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/BlendGAN/ffhq_dataset/gen_aligned_image.py +++ /dev/null @@ -1,24 +0,0 @@ -import os - -from .face_alignment import image_align -from .landmarks_detector import LandmarksDetector - - -cur_dir = os.path.split(os.path.realpath(__file__))[0] -model_path = os.path.join(cur_dir, 'shape_predictor_68_face_landmarks.dat') - - -class FaceAlign: - def __init__(self, predictor_model_path=model_path): - self.landmarks_detector = LandmarksDetector(predictor_model_path) - - def get_crop_image(self, image): - lms = [] - for i, face_landmarks in enumerate(self.landmarks_detector.get_landmarks(image), start=1): - lms.append(face_landmarks) - if len(lms) < 1: - return None - out_image = image_align(image, lms[0]) - - return out_image - diff --git a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/utils/stdout.pl b/spaces/akhaliq/VQMIVC/ParallelWaveGAN/utils/stdout.pl deleted file mode 100644 index 1636406b18c8d4c61cd2f04f83d41a5c6b42c299..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/utils/stdout.pl +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/env perl -use warnings; #sed replacement for -w perl parameter - -# In general, doing -# run.pl some.log a b c is like running the command a b c in -# the bash shell, and putting the standard error and output into some.log. -# To run parallel jobs (backgrounded on the host machine), you can do (e.g.) -# run.pl JOB=1:4 some.JOB.log a b c JOB is like running the command a b c JOB -# and putting it in some.JOB.log, for each one. [Note: JOB can be any identifier]. -# If any of the jobs fails, this script will fail. - -# A typical example is: -# run.pl some.log my-prog "--opt=foo bar" foo \| other-prog baz -# and run.pl will run something like: -# ( my-prog '--opt=foo bar' foo | other-prog baz ) >& some.log -# -# Basically it takes the command-line arguments, quotes them -# as necessary to preserve spaces, and evaluates them with bash. -# In addition it puts the command line at the top of the log, and -# the start and end times of the command at the beginning and end. -# The reason why this is useful is so that we can create a different -# version of this program that uses a queueing system instead. - -# use Data::Dumper; - -@ARGV < 2 && die "usage: run.pl log-file command-line arguments..."; - - -$max_jobs_run = -1; -$jobstart = 1; -$jobend = 1; -$ignored_opts = ""; # These will be ignored. - -# First parse an option like JOB=1:4, and any -# options that would normally be given to -# queue.pl, which we will just discard. - -for (my $x = 1; $x <= 2; $x++) { # This for-loop is to - # allow the JOB=1:n option to be interleaved with the - # options to qsub. - while (@ARGV >= 2 && $ARGV[0] =~ m:^-:) { - # parse any options that would normally go to qsub, but which will be ignored here. - my $switch = shift @ARGV; - if ($switch eq "-V") { - $ignored_opts .= "-V "; - } elsif ($switch eq "--max-jobs-run" || $switch eq "-tc") { - # we do support the option --max-jobs-run n, and its GridEngine form -tc n. - $max_jobs_run = shift @ARGV; - if (! ($max_jobs_run > 0)) { - die "run.pl: invalid option --max-jobs-run $max_jobs_run"; - } - } else { - my $argument = shift @ARGV; - if ($argument =~ m/^--/) { - print STDERR "run.pl: WARNING: suspicious argument '$argument' to $switch; starts with '-'\n"; - } - if ($switch eq "-sync" && $argument =~ m/^[yY]/) { - $ignored_opts .= "-sync "; # Note: in the - # corresponding code in queue.pl it says instead, just "$sync = 1;". - } elsif ($switch eq "-pe") { # e.g. -pe smp 5 - my $argument2 = shift @ARGV; - $ignored_opts .= "$switch $argument $argument2 "; - } elsif ($switch eq "--gpu") { - $using_gpu = $argument; - } else { - # Ignore option. - $ignored_opts .= "$switch $argument "; - } - } - } - if ($ARGV[0] =~ m/^([\w_][\w\d_]*)+=(\d+):(\d+)$/) { # e.g. JOB=1:20 - $jobname = $1; - $jobstart = $2; - $jobend = $3; - shift; - if ($jobstart > $jobend) { - die "run.pl: invalid job range $ARGV[0]"; - } - if ($jobstart <= 0) { - die "run.pl: invalid job range $ARGV[0], start must be strictly positive (this is required for GridEngine compatibility)."; - } - } elsif ($ARGV[0] =~ m/^([\w_][\w\d_]*)+=(\d+)$/) { # e.g. JOB=1. - $jobname = $1; - $jobstart = $2; - $jobend = $2; - shift; - } elsif ($ARGV[0] =~ m/.+\=.*\:.*$/) { - print STDERR "run.pl: Warning: suspicious first argument to run.pl: $ARGV[0]\n"; - } -} - -# Users found this message confusing so we are removing it. -# if ($ignored_opts ne "") { -# print STDERR "run.pl: Warning: ignoring options \"$ignored_opts\"\n"; -# } - -if ($max_jobs_run == -1) { # If --max-jobs-run option not set, - # then work out the number of processors if possible, - # and set it based on that. - $max_jobs_run = 0; - if ($using_gpu) { - if (open(P, "nvidia-smi -L |")) { - $max_jobs_run++ while (

    ); - close(P); - } - if ($max_jobs_run == 0) { - $max_jobs_run = 1; - print STDERR "run.pl: Warning: failed to detect number of GPUs from nvidia-smi, using ${max_jobs_run}\n"; - } - } elsif (open(P, ") { if (m/^processor/) { $max_jobs_run++; } } - if ($max_jobs_run == 0) { - print STDERR "run.pl: Warning: failed to detect any processors from /proc/cpuinfo\n"; - $max_jobs_run = 10; # reasonable default. - } - close(P); - } elsif (open(P, "sysctl -a |")) { # BSD/Darwin - while (

    ) { - if (m/hw\.ncpu\s*[:=]\s*(\d+)/) { # hw.ncpu = 4, or hw.ncpu: 4 - $max_jobs_run = $1; - last; - } - } - close(P); - if ($max_jobs_run == 0) { - print STDERR "run.pl: Warning: failed to detect any processors from sysctl -a\n"; - $max_jobs_run = 10; # reasonable default. - } - } else { - # allow at most 32 jobs at once, on non-UNIX systems; change this code - # if you need to change this default. - $max_jobs_run = 32; - } - # The just-computed value of $max_jobs_run is just the number of processors - # (or our best guess); and if it happens that the number of jobs we need to - # run is just slightly above $max_jobs_run, it will make sense to increase - # $max_jobs_run to equal the number of jobs, so we don't have a small number - # of leftover jobs. - $num_jobs = $jobend - $jobstart + 1; - if (!$using_gpu && - $num_jobs > $max_jobs_run && $num_jobs < 1.4 * $max_jobs_run) { - $max_jobs_run = $num_jobs; - } -} - -$logfile = shift @ARGV; - -if (defined $jobname && $logfile !~ m/$jobname/ && - $jobend > $jobstart) { - print STDERR "run.pl: you are trying to run a parallel job but " - . "you are putting the output into just one log file ($logfile)\n"; - exit(1); -} - -$cmd = ""; - -foreach $x (@ARGV) { - if ($x =~ m/^\S+$/) { $cmd .= $x . " "; } - elsif ($x =~ m:\":) { $cmd .= "'$x' "; } - else { $cmd .= "\"$x\" "; } -} - -#$Data::Dumper::Indent=0; -$ret = 0; -$numfail = 0; -%active_pids=(); - -use POSIX ":sys_wait_h"; -for ($jobid = $jobstart; $jobid <= $jobend; $jobid++) { - if (scalar(keys %active_pids) >= $max_jobs_run) { - - # Lets wait for a change in any child's status - # Then we have to work out which child finished - $r = waitpid(-1, 0); - $code = $?; - if ($r < 0 ) { die "run.pl: Error waiting for child process"; } # should never happen. - if ( defined $active_pids{$r} ) { - $jid=$active_pids{$r}; - $fail[$jid]=$code; - if ($code !=0) { $numfail++;} - delete $active_pids{$r}; - # print STDERR "Finished: $r/$jid " . Dumper(\%active_pids) . "\n"; - } else { - die "run.pl: Cannot find the PID of the chold process that just finished."; - } - - # In theory we could do a non-blocking waitpid over all jobs running just - # to find out if only one or more jobs finished during the previous waitpid() - # However, we just omit this and will reap the next one in the next pass - # through the for(;;) cycle - } - $childpid = fork(); - if (!defined $childpid) { die "run.pl: Error forking in run.pl (writing to $logfile)"; } - if ($childpid == 0) { # We're in the child... this branch - # executes the job and returns (possibly with an error status). - if (defined $jobname) { - $cmd =~ s/$jobname/$jobid/g; - $logfile =~ s/$jobname/$jobid/g; - } - system("mkdir -p `dirname $logfile` 2>/dev/null"); - open(F, ">$logfile") || die "run.pl: Error opening log file $logfile"; - print F "# " . $cmd . "\n"; - print F "# Started at " . `date`; - $starttime = `date +'%s'`; - print F "#\n"; - close(F); - - # Pipe into bash.. make sure we're not using any other shell. - open(B, "|bash") || die "run.pl: Error opening shell command"; - print B "( " . $cmd . ") |& tee -a $logfile"; - close(B); # If there was an error, exit status is in $? - $ret = $?; - - $lowbits = $ret & 127; - $highbits = $ret >> 8; - if ($lowbits != 0) { $return_str = "code $highbits; signal $lowbits" } - else { $return_str = "code $highbits"; } - - $endtime = `date +'%s'`; - open(F, ">>$logfile") || die "run.pl: Error opening log file $logfile (again)"; - $enddate = `date`; - chop $enddate; - print F "# Accounting: time=" . ($endtime - $starttime) . " threads=1\n"; - print F "# Ended ($return_str) at " . $enddate . ", elapsed time " . ($endtime-$starttime) . " seconds\n"; - close(F); - exit($ret == 0 ? 0 : 1); - } else { - $pid[$jobid] = $childpid; - $active_pids{$childpid} = $jobid; - # print STDERR "Queued: " . Dumper(\%active_pids) . "\n"; - } -} - -# Now we have submitted all the jobs, lets wait until all the jobs finish -foreach $child (keys %active_pids) { - $jobid=$active_pids{$child}; - $r = waitpid($pid[$jobid], 0); - $code = $?; - if ($r == -1) { die "run.pl: Error waiting for child process"; } # should never happen. - if ($r != 0) { $fail[$jobid]=$code; $numfail++ if $code!=0; } # Completed successfully -} - -# Some sanity checks: -# The $fail array should not contain undefined codes -# The number of non-zeros in that array should be equal to $numfail -# We cannot do foreach() here, as the JOB ids do not necessarily start by zero -$failed_jids=0; -for ($jobid = $jobstart; $jobid <= $jobend; $jobid++) { - $job_return = $fail[$jobid]; - if (not defined $job_return ) { - # print Dumper(\@fail); - - die "run.pl: Sanity check failed: we have indication that some jobs are running " . - "even after we waited for all jobs to finish" ; - } - if ($job_return != 0 ){ $failed_jids++;} -} -if ($failed_jids != $numfail) { - die "run.pl: Sanity check failed: cannot find out how many jobs failed ($failed_jids x $numfail)." -} -if ($numfail > 0) { $ret = 1; } - -if ($ret != 0) { - $njobs = $jobend - $jobstart + 1; - if ($njobs == 1) { - if (defined $jobname) { - $logfile =~ s/$jobname/$jobstart/; # only one numbered job, so replace name with - # that job. - } - print STDERR "run.pl: job failed, log is in $logfile\n"; - if ($logfile =~ m/JOB/) { - print STDERR "run.pl: probably you forgot to put JOB=1:\$nj in your script."; - } - } - else { - $logfile =~ s/$jobname/*/g; - print STDERR "run.pl: $numfail / $njobs failed, log is in $logfile\n"; - } -} - - -exit ($ret); diff --git a/spaces/alamin655/Personas/conversant/prompts/__init__.py b/spaces/alamin655/Personas/conversant/prompts/__init__.py deleted file mode 100644 index 91bb77de25885e0c9319874b329891884d167eb2..0000000000000000000000000000000000000000 --- a/spaces/alamin655/Personas/conversant/prompts/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) 2022 Cohere Inc. and its affiliates. -# -# Licensed under the MIT License (the "License"); -# you may not use this file except in compliance with the License. -# -# You may obtain a copy of the License in the LICENSE file at the top -# level of this repository. - -from conversant.prompts.chat_prompt import ChatPrompt -from conversant.prompts.prompt import Prompt -from conversant.prompts.rewrite_prompt import RewritePrompt - -__all__ = ["Prompt", "ChatPrompt", "RewritePrompt"] diff --git a/spaces/aldrinjenson/harry-potter-character-classifier/app.py b/spaces/aldrinjenson/harry-potter-character-classifier/app.py deleted file mode 100644 index 7f188c2251191a7da21f443d87a1b76ae2500924..0000000000000000000000000000000000000000 --- a/spaces/aldrinjenson/harry-potter-character-classifier/app.py +++ /dev/null @@ -1,25 +0,0 @@ -from fastai.vision.all import * -import gradio as gr - -title = "Harry Potter Character classifier" -description = "A Harry Potter Character classifier trained with over 3000 images from the internet!" -article = "

    Created by Aldrin Jenson

    " -examples = ['sample_images/daniel_radcliffe-000001.jpeg', 'sample_images/daniel_radcliffe-000002.jpeg', 'sample_images/daniel_radcliffe-000003.jpeg', 'sample_images/daniel_radcliffe-000202.jpeg', 'sample_images/daniel_radcliffe-000324.jpeg', 'sample_images/daniel_radcliffe-000381.jpeg', 'sample_images/dracoMalfoy-000002.jpeg', 'sample_images/dracoMalfoy-000003.jpeg', 'sample_images/ginnyweasly-000001.jpeg', - 'sample_images/ginnyweasly-000002.jpeg', 'sample_images/ginnyweasly-000003.jpeg', 'sample_images/ginnyweasly-000328.jpeg', 'sample_images/ginnyweasly-000365.jpeg', 'sample_images/ginnyweasly-000388.jpeg', 'sample_images/ginnyweasly-000404.jpeg', 'sample_images/groupPhoto-000423.jpeg', 'sample_images/lunaLovegood-000001.jpeg', 'sample_images/lunaLovegood-000002.jpeg', 'sample_images/lunaLovegood-000003.jpeg'] -interpretation = 'default' -enable_queue = True - - -learn = load_learner('export.pkl') -labels = learn.dls.vocab - - -def predict(img): - img = PILImage.create(img) - pred, pred_idx, probs = learn.predict(img) - return {labels[i]: float(probs[i]) for i in range(len(labels))} - - -interface = gr.Interface(fn=predict, inputs=gr.components.Image(shape=(512, 512)), outputs=gr.components.Label( - num_top_classes=6), title=title, description=description, article=article, examples=examples, interpretation=interpretation) -interface.launch(share=False, enable_queue=enable_queue) diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/cli/status_codes.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/cli/status_codes.py deleted file mode 100644 index 5e29502cddfa9a9887a93399ab4193fb75dfe605..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/cli/status_codes.py +++ /dev/null @@ -1,6 +0,0 @@ -SUCCESS = 0 -ERROR = 1 -UNKNOWN_ERROR = 2 -VIRTUALENV_NOT_FOUND = 3 -PREVIOUS_BUILD_DIR_ERROR = 4 -NO_MATCHES_FOUND = 23 diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/spinner.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/spinner.py deleted file mode 100644 index 5b13b1e9ba2c495ee4f00b8bc1f750625b7b6ff7..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/spinner.py +++ /dev/null @@ -1,134 +0,0 @@ -from typing import cast, List, Optional, TYPE_CHECKING - -from ._spinners import SPINNERS -from .measure import Measurement -from .table import Table -from .text import Text - -if TYPE_CHECKING: - from .console import Console, ConsoleOptions, RenderResult, RenderableType - from .style import StyleType - - -class Spinner: - def __init__( - self, - name: str, - text: "RenderableType" = "", - *, - style: Optional["StyleType"] = None, - speed: float = 1.0, - ) -> None: - """A spinner animation. - - Args: - name (str): Name of spinner (run python -m rich.spinner). - text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "". - style (StyleType, optional): Style for spinner animation. Defaults to None. - speed (float, optional): Speed factor for animation. Defaults to 1.0. - - Raises: - KeyError: If name isn't one of the supported spinner animations. - """ - try: - spinner = SPINNERS[name] - except KeyError: - raise KeyError(f"no spinner called {name!r}") - self.text = Text.from_markup(text) if isinstance(text, str) else text - self.frames = cast(List[str], spinner["frames"])[:] - self.interval = cast(float, spinner["interval"]) - self.start_time: Optional[float] = None - self.style = style - self.speed = speed - self.frame_no_offset: float = 0.0 - self._update_speed = 0.0 - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": - yield self.render(console.get_time()) - - def __rich_measure__( - self, console: "Console", options: "ConsoleOptions" - ) -> Measurement: - text = self.render(0) - return Measurement.get(console, options, text) - - def render(self, time: float) -> "RenderableType": - """Render the spinner for a given time. - - Args: - time (float): Time in seconds. - - Returns: - RenderableType: A renderable containing animation frame. - """ - if self.start_time is None: - self.start_time = time - - frame_no = ((time - self.start_time) * self.speed) / ( - self.interval / 1000.0 - ) + self.frame_no_offset - frame = Text( - self.frames[int(frame_no) % len(self.frames)], style=self.style or "" - ) - - if self._update_speed: - self.frame_no_offset = frame_no - self.start_time = time - self.speed = self._update_speed - self._update_speed = 0.0 - - if not self.text: - return frame - elif isinstance(self.text, (str, Text)): - return Text.assemble(frame, " ", self.text) - else: - table = Table.grid(padding=1) - table.add_row(frame, self.text) - return table - - def update( - self, - *, - text: "RenderableType" = "", - style: Optional["StyleType"] = None, - speed: Optional[float] = None, - ) -> None: - """Updates attributes of a spinner after it has been started. - - Args: - text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "". - style (StyleType, optional): Style for spinner animation. Defaults to None. - speed (float, optional): Speed factor for animation. Defaults to None. - """ - if text: - self.text = Text.from_markup(text) if isinstance(text, str) else text - if style: - self.style = style - if speed: - self._update_speed = speed - - -if __name__ == "__main__": # pragma: no cover - from time import sleep - - from .columns import Columns - from .panel import Panel - from .live import Live - - all_spinners = Columns( - [ - Spinner(spinner_name, text=Text(repr(spinner_name), style="green")) - for spinner_name in sorted(SPINNERS.keys()) - ], - column_first=True, - expand=True, - ) - - with Live( - Panel(all_spinners, title="Spinners", border_style="blue"), - refresh_per_second=20, - ) as live: - while True: - sleep(0.1) diff --git a/spaces/algomuffin/jojo_fork/e4e/models/encoders/model_irse.py b/spaces/algomuffin/jojo_fork/e4e/models/encoders/model_irse.py deleted file mode 100644 index 6a94d67542f961ff6533f0335cf4cb0fa54024fb..0000000000000000000000000000000000000000 --- a/spaces/algomuffin/jojo_fork/e4e/models/encoders/model_irse.py +++ /dev/null @@ -1,84 +0,0 @@ -from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module -from e4e.models.encoders.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm - -""" -Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) -""" - - -class Backbone(Module): - def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True): - super(Backbone, self).__init__() - assert input_size in [112, 224], "input_size should be 112 or 224" - assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" - assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se" - blocks = get_blocks(num_layers) - if mode == 'ir': - unit_module = bottleneck_IR - elif mode == 'ir_se': - unit_module = bottleneck_IR_SE - self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), - BatchNorm2d(64), - PReLU(64)) - if input_size == 112: - self.output_layer = Sequential(BatchNorm2d(512), - Dropout(drop_ratio), - Flatten(), - Linear(512 * 7 * 7, 512), - BatchNorm1d(512, affine=affine)) - else: - self.output_layer = Sequential(BatchNorm2d(512), - Dropout(drop_ratio), - Flatten(), - Linear(512 * 14 * 14, 512), - BatchNorm1d(512, affine=affine)) - - modules = [] - for block in blocks: - for bottleneck in block: - modules.append(unit_module(bottleneck.in_channel, - bottleneck.depth, - bottleneck.stride)) - self.body = Sequential(*modules) - - def forward(self, x): - x = self.input_layer(x) - x = self.body(x) - x = self.output_layer(x) - return l2_norm(x) - - -def IR_50(input_size): - """Constructs a ir-50 model.""" - model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False) - return model - - -def IR_101(input_size): - """Constructs a ir-101 model.""" - model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False) - return model - - -def IR_152(input_size): - """Constructs a ir-152 model.""" - model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False) - return model - - -def IR_SE_50(input_size): - """Constructs a ir_se-50 model.""" - model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False) - return model - - -def IR_SE_101(input_size): - """Constructs a ir_se-101 model.""" - model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False) - return model - - -def IR_SE_152(input_size): - """Constructs a ir_se-152 model.""" - model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False) - return model diff --git a/spaces/aliabd/SummerTime/tests/integration_test.py b/spaces/aliabd/SummerTime/tests/integration_test.py deleted file mode 100644 index 7db778a36f613009b6166b7698c68e6c46a99305..0000000000000000000000000000000000000000 --- a/spaces/aliabd/SummerTime/tests/integration_test.py +++ /dev/null @@ -1,126 +0,0 @@ -import unittest - -from model.base_model import SummModel -from model import SUPPORTED_SUMM_MODELS - -from pipeline import assemble_model_pipeline - -from evaluation.base_metric import SummMetric -from evaluation import SUPPORTED_EVALUATION_METRICS - -from dataset.st_dataset import SummInstance, SummDataset -from dataset import SUPPORTED_SUMM_DATASETS -from dataset.dataset_loaders import ScisummnetDataset, ArxivDataset - -from helpers import print_with_color, retrieve_random_test_instances - -import random -import time -from typing import List, Union, Tuple -import sys -import re - - -class IntegrationTests(unittest.TestCase): - def get_prediction( - self, model: SummModel, dataset: SummDataset, test_instances: List[SummInstance] - ) -> Tuple[Union[List[str], List[List[str]]], Union[List[str], List[List[str]]]]: - """ - Get summary prediction given model and dataset instances. - - :param SummModel `model`: Model for summarization task. - :param SummDataset `dataset`: Dataset for summarization task. - :param List[SummInstance] `test_instances`: Instances from `dataset` to summarize. - :returns Tuple containing summary list of summary predictions and targets corresponding to each instance in `test_instances`. - """ - - src = ( - [ins.source[0] for ins in test_instances] - if isinstance(dataset, ScisummnetDataset) - else [ins.source for ins in test_instances] - ) - tgt = [ins.summary for ins in test_instances] - query = ( - [ins.query for ins in test_instances] if dataset.is_query_based else None - ) - prediction = model.summarize(src, query) - return prediction, tgt - - def get_eval_dict(self, metric: SummMetric, prediction: List[str], tgt: List[str]): - """ - Run evaluation metric on summary prediction. - - :param SummMetric `metric`: Evaluation metric. - :param List[str] `prediction`: Summary prediction instances. - :param List[str] `tgt`: Target prediction instances from dataset. - """ - score_dict = metric.evaluate(prediction, tgt) - return score_dict - - def test_all(self): - """ - Runs integration test on all compatible dataset + model + evaluation metric pipelines supported by SummerTime. - """ - - print_with_color("\nInitializing all evaluation metrics...", "35") - evaluation_metrics = [] - for eval_cls in SUPPORTED_EVALUATION_METRICS: - # # TODO: Temporarily skipping Rouge/RougeWE metrics to avoid local bug. - # if eval_cls in [Rouge, RougeWe]: - # continue - print(eval_cls) - evaluation_metrics.append(eval_cls()) - - print_with_color("\n\nBeginning integration tests...", "35") - for dataset_cls in SUPPORTED_SUMM_DATASETS: - # TODO: Temporarily skipping Arxiv (size/time) - if dataset_cls in [ArxivDataset]: - continue - dataset = dataset_cls() - if dataset.train_set is not None: - dataset_instances = list(dataset.train_set) - print( - f"\n{dataset.dataset_name} has a training set of {len(dataset_instances)} examples" - ) - print_with_color( - f"Initializing all matching model pipelines for {dataset.dataset_name} dataset...", - "35", - ) - # matching_model_instances = assemble_model_pipeline(dataset_cls, list(filter(lambda m: m != PegasusModel, SUPPORTED_SUMM_MODELS))) - matching_model_instances = assemble_model_pipeline( - dataset_cls, SUPPORTED_SUMM_MODELS - ) - for model, model_name in matching_model_instances: - test_instances = retrieve_random_test_instances( - dataset_instances=dataset_instances, num_instances=1 - ) - print_with_color( - f"{'#' * 20} Testing: {dataset.dataset_name} dataset, {model_name} model {'#' * 20}", - "35", - ) - prediction, tgt = self.get_prediction( - model, dataset, test_instances - ) - print(f"Prediction: {prediction}\nTarget: {tgt}\n") - for metric in evaluation_metrics: - print_with_color(f"{metric.metric_name} metric", "35") - score_dict = self.get_eval_dict(metric, prediction, tgt) - print(score_dict) - - print_with_color( - f"{'#' * 20} Test for {dataset.dataset_name} dataset, {model_name} model COMPLETE {'#' * 20}\n\n", - "32", - ) - - -if __name__ == "__main__": - if len(sys.argv) > 2 or ( - len(sys.argv) == 2 and not re.match("^\\d+$", sys.argv[1]) - ): - print("Usage: python tests/integration_test.py [seed]", file=sys.stderr) - sys.exit(1) - - seed = int(time.time()) if len(sys.argv) == 1 else int(sys.argv.pop()) - random.seed(seed) - print_with_color(f"(to reproduce) random seeded with {seed}\n", "32") - unittest.main() diff --git a/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/client/css/conversation.css b/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/client/css/conversation.css deleted file mode 100644 index d20f178c45e8ccbfc9539f99914b25fc572045bd..0000000000000000000000000000000000000000 --- a/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/client/css/conversation.css +++ /dev/null @@ -1,158 +0,0 @@ -.conversation { - width: 60%; - margin: 0px 16px; - display: flex; - flex-direction: column; -} - -.conversation #messages { - width: 100%; - display: flex; - flex-direction: column; - overflow: auto; - overflow-wrap: break-word; - padding-bottom: 8px; -} - -.conversation .user-input { - max-height: 180px; - margin: 16px 0px; -} - -.conversation .user-input input { - font-size: 1rem; - background: none; - border: none; - outline: none; - color: var(--colour-3); -} - -.conversation .user-input input::placeholder { - color: var(--user-input); -} - -.conversation-title { - color: var(--colour-3); - font-size: 14px; -} - -.conversation .user-input textarea { - font-size: 1rem; - width: 100%; - height: 100%; - padding: 12px; - background: none; - border: none; - outline: none; - color: var(--colour-3); - resize: vertical; - max-height: 150px; - min-height: 80px; -} - -.box { - backdrop-filter: blur(20px); - -webkit-backdrop-filter: blur(20px); - background-color: var(--blur-bg); - height: 100%; - width: 100%; - border-radius: var(--border-radius-1); - border: 1px solid var(--blur-border); -} - -.box.input-box { - position: relative; - align-items: center; - padding: 8px; - cursor: pointer; -} - -#send-button { - position: absolute; - bottom: 25%; - right: 10px; - z-index: 1; - padding: 16px; -} - -#cursor { - line-height: 17px; - margin-left: 3px; - -webkit-animation: blink 0.8s infinite; - animation: blink 0.8s infinite; - width: 7px; - height: 15px; -} - -@keyframes blink { - 0% { - background: #ffffff00; - } - - 50% { - background: white; - } - - 100% { - background: #ffffff00; - } -} - -@-webkit-keyframes blink { - 0% { - background: #ffffff00; - } - - 50% { - background: white; - } - - 100% { - background: #ffffff00; - } -} - -/* scrollbar */ -.conversation #messages::-webkit-scrollbar { - width: 4px; - padding: 8px 0px; -} - -.conversation #messages::-webkit-scrollbar-track { - background-color: #ffffff00; -} - -.conversation #messages::-webkit-scrollbar-thumb { - background-color: #555555; - border-radius: 10px; -} - -@media screen and (max-width: 990px) { - .conversation { - width: 100%; - height: 90%; - } -} - -@media screen and (max-height: 720px) { - .conversation.box { - height: 70%; - } - - .conversation .user-input textarea { - font-size: 0.875rem; - } -} - -@media screen and (max-width: 360px) { - .box { - border-radius: 0; - } - .conversation { - margin: 0; - margin-top: 48px; - } - .conversation .user-input { - margin: 2px 0 8px 0; - } -} diff --git a/spaces/antonovmaxim/text-generation-webui-space/extensions/openai/cache_embedding_model.py b/spaces/antonovmaxim/text-generation-webui-space/extensions/openai/cache_embedding_model.py deleted file mode 100644 index 44ac1dcd663d09a9f36bf9793ee2fa653339cbb3..0000000000000000000000000000000000000000 --- a/spaces/antonovmaxim/text-generation-webui-space/extensions/openai/cache_embedding_model.py +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env python3 -# preload the embedding model, useful for Docker images to prevent re-download on config change -# Dockerfile: -# ENV OPENEDAI_EMBEDDING_MODEL=all-mpnet-base-v2 # Optional -# RUN python3 cache_embedded_model.py -import os, sentence_transformers -st_model = os.environ["OPENEDAI_EMBEDDING_MODEL"] if "OPENEDAI_EMBEDDING_MODEL" in os.environ else "all-mpnet-base-v2" -model = sentence_transformers.SentenceTransformer(st_model) diff --git a/spaces/anurag629/botaniscan/app/models/__init__.py b/spaces/anurag629/botaniscan/app/models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/save_images.py b/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/save_images.py deleted file mode 100644 index 8b6c60c5bfec5947b0a9bf7f9fb87512e97e5ad6..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/save_images.py +++ /dev/null @@ -1,80 +0,0 @@ -from typing import List, Tuple -from einops import rearrange -import numpy as np, os, torch -from PIL import Image -from torchvision.utils import make_grid -import time - - -def get_output_folder(output_path, batch_folder): - out_path = os.path.join(output_path,time.strftime('%Y-%m')) - if batch_folder != "": - out_path = os.path.join(out_path, batch_folder) - os.makedirs(out_path, exist_ok=True) - return out_path - - -def save_samples( - args, x_samples: torch.Tensor, seed: int, n_rows: int -) -> Tuple[Image.Image, List[Image.Image]]: - """Function to save samples to disk. - Args: - args: Stable deforum diffusion arguments. - x_samples: Samples to save. - seed: Seed for the experiment. - n_rows: Number of rows in the grid. - Returns: - A tuple of the grid image and a list of the generated images. - ( grid_image, generated_images ) - """ - - # save samples - images = [] - grid_image = None - if args.display_samples or args.save_samples: - for index, x_sample in enumerate(x_samples): - x_sample = 255.0 * rearrange(x_sample.cpu().numpy(), "c h w -> h w c") - images.append(Image.fromarray(x_sample.astype(np.uint8))) - if args.save_samples: - images[-1].save( - os.path.join( - args.outdir, f"{args.timestring}_{index:02}_{seed}.png" - ) - ) - - # save grid - if args.display_grid or args.save_grid: - grid = torch.stack([x_samples], 0) - grid = rearrange(grid, "n b c h w -> (n b) c h w") - grid = make_grid(grid, nrow=n_rows, padding=0) - - # to image - grid = 255.0 * rearrange(grid, "c h w -> h w c").cpu().numpy() - grid_image = Image.fromarray(grid.astype(np.uint8)) - if args.save_grid: - grid_image.save( - os.path.join(args.outdir, f"{args.timestring}_{seed}_grid.png") - ) - - # return grid_image and individual sample images - return grid_image, images - -def save_image(image, image_type, filename, args, video_args, root): - if video_args.store_frames_in_ram: - root.frames_cache.append({'path':os.path.join(args.outdir, filename), 'image':image, 'image_type':image_type}) - else: - image.save(os.path.join(args.outdir, filename)) - -import cv2, gc - -def reset_frames_cache(root): - root.frames_cache = [] - gc.collect() - -def dump_frames_cache(root): - for image_cache in root.frames_cache: - if image_cache['image_type'] == 'cv2': - cv2.imwrite(image_cache['path'], image_cache['image']) - elif image_cache['image_type'] == 'PIL': - image_cache['image'].save(image_cache['path']) - # do not reset the cache since we're going to add frame erasing later function #TODO diff --git a/spaces/arnavkartikeya/SCRIPture-final/data/__init__.py b/spaces/arnavkartikeya/SCRIPture-final/data/__init__.py deleted file mode 100644 index 0be209acf415855ea6ef753efedf903b5decb6b9..0000000000000000000000000000000000000000 --- a/spaces/arnavkartikeya/SCRIPture-final/data/__init__.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -from torch.utils.data import DataLoader -from torchvision import transforms -from torchvision.transforms.functional import InterpolationMode - -from data.coco_karpathy_dataset import coco_karpathy_train, coco_karpathy_caption_eval, coco_karpathy_retrieval_eval -from data.nocaps_dataset import nocaps_eval -from data.flickr30k_dataset import flickr30k_train, flickr30k_retrieval_eval -from data.vqa_dataset import vqa_dataset -from data.nlvr_dataset import nlvr_dataset -from data.pretrain_dataset import pretrain_dataset -from transform.randaugment import RandomAugment - -def create_dataset(dataset, config, min_scale=0.5): - - normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) - - transform_train = transforms.Compose([ - transforms.RandomResizedCrop(config['image_size'],scale=(min_scale, 1.0),interpolation=InterpolationMode.BICUBIC), - transforms.RandomHorizontalFlip(), - RandomAugment(2,5,isPIL=True,augs=['Identity','AutoContrast','Brightness','Sharpness','Equalize', - 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), - transforms.ToTensor(), - normalize, - ]) - transform_test = transforms.Compose([ - transforms.Resize((config['image_size'],config['image_size']),interpolation=InterpolationMode.BICUBIC), - transforms.ToTensor(), - normalize, - ]) - - if dataset=='pretrain': - dataset = pretrain_dataset(config['train_file'], config['laion_path'], transform_train) - return dataset - - elif dataset=='caption_coco': - train_dataset = coco_karpathy_train(transform_train, config['image_root'], config['ann_root'], prompt=config['prompt']) - val_dataset = coco_karpathy_caption_eval(transform_test, config['image_root'], config['ann_root'], 'val') - test_dataset = coco_karpathy_caption_eval(transform_test, config['image_root'], config['ann_root'], 'test') - return train_dataset, val_dataset, test_dataset - - elif dataset=='nocaps': - val_dataset = nocaps_eval(transform_test, config['image_root'], config['ann_root'], 'val') - test_dataset = nocaps_eval(transform_test, config['image_root'], config['ann_root'], 'test') - return val_dataset, test_dataset - - elif dataset=='retrieval_coco': - train_dataset = coco_karpathy_train(transform_train, config['image_root'], config['ann_root']) - val_dataset = coco_karpathy_retrieval_eval(transform_test, config['image_root'], config['ann_root'], 'val') - test_dataset = coco_karpathy_retrieval_eval(transform_test, config['image_root'], config['ann_root'], 'test') - return train_dataset, val_dataset, test_dataset - - elif dataset=='retrieval_flickr': - train_dataset = flickr30k_train(transform_train, config['image_root'], config['ann_root']) - val_dataset = flickr30k_retrieval_eval(transform_test, config['image_root'], config['ann_root'], 'val') - test_dataset = flickr30k_retrieval_eval(transform_test, config['image_root'], config['ann_root'], 'test') - return train_dataset, val_dataset, test_dataset - - elif dataset=='vqa': - train_dataset = vqa_dataset(transform_train, config['ann_root'], config['vqa_root'], config['vg_root'], - train_files = config['train_files'], split='train') - test_dataset = vqa_dataset(transform_test, config['ann_root'], config['vqa_root'], config['vg_root'], split='test') - return train_dataset, test_dataset - - elif dataset=='nlvr': - train_dataset = nlvr_dataset(transform_train, config['image_root'], config['ann_root'],'train') - val_dataset = nlvr_dataset(transform_test, config['image_root'], config['ann_root'],'val') - test_dataset = nlvr_dataset(transform_test, config['image_root'], config['ann_root'],'test') - return train_dataset, val_dataset, test_dataset - - -def create_sampler(datasets, shuffles, num_tasks, global_rank): - samplers = [] - for dataset,shuffle in zip(datasets,shuffles): - sampler = torch.utils.data.DistributedSampler(dataset, num_replicas=num_tasks, rank=global_rank, shuffle=shuffle) - samplers.append(sampler) - return samplers - - -def create_loader(datasets, samplers, batch_size, num_workers, is_trains, collate_fns): - loaders = [] - for dataset,sampler,bs,n_worker,is_train,collate_fn in zip(datasets,samplers,batch_size,num_workers,is_trains,collate_fns): - if is_train: - shuffle = (sampler is None) - drop_last = True - else: - shuffle = False - drop_last = False - loader = DataLoader( - dataset, - batch_size=bs, - num_workers=n_worker, - pin_memory=True, - sampler=sampler, - shuffle=shuffle, - collate_fn=collate_fn, - drop_last=drop_last, - ) - loaders.append(loader) - return loaders - diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/utils/text/punctuation.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/utils/text/punctuation.py deleted file mode 100644 index 8d199cc545b36f6089056d904fd0a03670012c4d..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/utils/text/punctuation.py +++ /dev/null @@ -1,172 +0,0 @@ -import collections -import re -from enum import Enum - -import six - -_DEF_PUNCS = ';:,.!?¡¿—…"«»“”' - -_PUNC_IDX = collections.namedtuple("_punc_index", ["punc", "position"]) - - -class PuncPosition(Enum): - """Enum for the punctuations positions""" - - BEGIN = 0 - END = 1 - MIDDLE = 2 - ALONE = 3 - - -class Punctuation: - """Handle punctuations in text. - - Just strip punctuations from text or strip and restore them later. - - Args: - puncs (str): The punctuations to be processed. Defaults to `_DEF_PUNCS`. - - Example: - >>> punc = Punctuation() - >>> punc.strip("This is. example !") - 'This is example' - - >>> text_striped, punc_map = punc.strip_to_restore("This is. example !") - >>> ' '.join(text_striped) - 'This is example' - - >>> text_restored = punc.restore(text_striped, punc_map) - >>> text_restored[0] - 'This is. example !' - """ - - def __init__(self, puncs: str = _DEF_PUNCS): - self.puncs = puncs - - @staticmethod - def default_puncs(): - """Return default set of punctuations.""" - return _DEF_PUNCS - - @property - def puncs(self): - return self._puncs - - @puncs.setter - def puncs(self, value): - if not isinstance(value, six.string_types): - raise ValueError("[!] Punctuations must be of type str.") - self._puncs = "".join(list(dict.fromkeys(list(value)))) # remove duplicates without changing the oreder - self.puncs_regular_exp = re.compile(rf"(\s*[{re.escape(self._puncs)}]+\s*)+") - - def strip(self, text): - """Remove all the punctuations by replacing with `space`. - - Args: - text (str): The text to be processed. - - Example:: - - "This is. example !" -> "This is example " - """ - return re.sub(self.puncs_regular_exp, " ", text).rstrip().lstrip() - - def strip_to_restore(self, text): - """Remove punctuations from text to restore them later. - - Args: - text (str): The text to be processed. - - Examples :: - - "This is. example !" -> [["This is", "example"], [".", "!"]] - - """ - text, puncs = self._strip_to_restore(text) - return text, puncs - - def _strip_to_restore(self, text): - """Auxiliary method for Punctuation.preserve()""" - matches = list(re.finditer(self.puncs_regular_exp, text)) - if not matches: - return [text], [] - # the text is only punctuations - if len(matches) == 1 and matches[0].group() == text: - return [], [_PUNC_IDX(text, PuncPosition.ALONE)] - # build a punctuation map to be used later to restore punctuations - puncs = [] - for match in matches: - position = PuncPosition.MIDDLE - if match == matches[0] and text.startswith(match.group()): - position = PuncPosition.BEGIN - elif match == matches[-1] and text.endswith(match.group()): - position = PuncPosition.END - puncs.append(_PUNC_IDX(match.group(), position)) - # convert str text to a List[str], each item is separated by a punctuation - splitted_text = [] - for idx, punc in enumerate(puncs): - split = text.split(punc.punc) - prefix, suffix = split[0], punc.punc.join(split[1:]) - splitted_text.append(prefix) - # if the text does not end with a punctuation, add it to the last item - if idx == len(puncs) - 1 and len(suffix) > 0: - splitted_text.append(suffix) - text = suffix - return splitted_text, puncs - - @classmethod - def restore(cls, text, puncs): - """Restore punctuation in a text. - - Args: - text (str): The text to be processed. - puncs (List[str]): The list of punctuations map to be used for restoring. - - Examples :: - - ['This is', 'example'], ['.', '!'] -> "This is. example!" - - """ - return cls._restore(text, puncs, 0) - - @classmethod - def _restore(cls, text, puncs, num): # pylint: disable=too-many-return-statements - """Auxiliary method for Punctuation.restore()""" - if not puncs: - return text - - # nothing have been phonemized, returns the puncs alone - if not text: - return ["".join(m.punc for m in puncs)] - - current = puncs[0] - - if current.position == PuncPosition.BEGIN: - return cls._restore([current.punc + text[0]] + text[1:], puncs[1:], num) - - if current.position == PuncPosition.END: - return [text[0] + current.punc] + cls._restore(text[1:], puncs[1:], num + 1) - - if current.position == PuncPosition.ALONE: - return [current.mark] + cls._restore(text, puncs[1:], num + 1) - - # POSITION == MIDDLE - if len(text) == 1: # pragma: nocover - # a corner case where the final part of an intermediate - # mark (I) has not been phonemized - return cls._restore([text[0] + current.punc], puncs[1:], num) - - return cls._restore([text[0] + current.punc + text[1]] + text[2:], puncs[1:], num) - - -# if __name__ == "__main__": -# punc = Punctuation() -# text = "This is. This is, example!" - -# print(punc.strip(text)) - -# split_text, puncs = punc.strip_to_restore(text) -# print(split_text, " ---- ", puncs) - -# restored_text = punc.restore(split_text, puncs) -# print(restored_text) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Tests/TestTreeFragment.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Tests/TestTreeFragment.py deleted file mode 100644 index 9ee8da5478cb5f2054ccb07a4306b6b35be0f310..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Tests/TestTreeFragment.py +++ /dev/null @@ -1,64 +0,0 @@ -from Cython.TestUtils import CythonTest -from Cython.Compiler.TreeFragment import * -from Cython.Compiler.Nodes import * -from Cython.Compiler.UtilNodes import * -import Cython.Compiler.Naming as Naming - -class TestTreeFragments(CythonTest): - - def test_basic(self): - F = self.fragment(u"x = 4") - T = F.copy() - self.assertCode(u"x = 4", T) - - def test_copy_is_taken(self): - F = self.fragment(u"if True: x = 4") - T1 = F.root - T2 = F.copy() - self.assertEqual("x", T2.stats[0].if_clauses[0].body.lhs.name) - T2.stats[0].if_clauses[0].body.lhs.name = "other" - self.assertEqual("x", T1.stats[0].if_clauses[0].body.lhs.name) - - def test_substitutions_are_copied(self): - T = self.fragment(u"y + y").substitute({"y": NameNode(pos=None, name="x")}) - self.assertEqual("x", T.stats[0].expr.operand1.name) - self.assertEqual("x", T.stats[0].expr.operand2.name) - self.assertTrue(T.stats[0].expr.operand1 is not T.stats[0].expr.operand2) - - def test_substitution(self): - F = self.fragment(u"x = 4") - y = NameNode(pos=None, name=u"y") - T = F.substitute({"x" : y}) - self.assertCode(u"y = 4", T) - - def test_exprstat(self): - F = self.fragment(u"PASS") - pass_stat = PassStatNode(pos=None) - T = F.substitute({"PASS" : pass_stat}) - self.assertTrue(isinstance(T.stats[0], PassStatNode), T) - - def test_pos_is_transferred(self): - F = self.fragment(u""" - x = y - x = u * v ** w - """) - T = F.substitute({"v" : NameNode(pos=None, name="a")}) - v = F.root.stats[1].rhs.operand2.operand1 - a = T.stats[1].rhs.operand2.operand1 - self.assertEqual(v.pos, a.pos) - - def test_temps(self): - TemplateTransform.temp_name_counter = 0 - F = self.fragment(u""" - TMP - x = TMP - """) - T = F.substitute(temps=[u"TMP"]) - s = T.body.stats - self.assertTrue(isinstance(s[0].expr, TempRefNode)) - self.assertTrue(isinstance(s[1].rhs, TempRefNode)) - self.assertTrue(s[0].expr.handle is s[1].rhs.handle) - -if __name__ == "__main__": - import unittest - unittest.main() diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/anscombe_plot.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/anscombe_plot.py deleted file mode 100644 index 9e200748533871f26f7e27c98a9fe82da1c0a47c..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/anscombe_plot.py +++ /dev/null @@ -1,20 +0,0 @@ -""" -Anscombe's Quartet ------------------- - -This example shows how to use the column channel to make a trellis plot. Anscombe's Quartet is a famous dataset constructed by Francis Anscombe. Common summary statistics are identical for each subset of the data, despite the subsets having vastly different characteristics. -""" -# category: case studies -import altair as alt -from vega_datasets import data - -source = data.anscombe() - -alt.Chart(source).mark_circle().encode( - alt.X('X', scale=alt.Scale(zero=False)), - alt.Y('Y', scale=alt.Scale(zero=False)), - alt.Facet('Series', columns=2), -).properties( - width=180, - height=180, -) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/dataclass/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/dataclass/__init__.py deleted file mode 100644 index 25408d28ec44cee56eb5fb3ab0c817dc04159e95..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/dataclass/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .configs import FairseqDataclass -from .constants import ChoiceEnum - - -__all__ = [ - "FairseqDataclass", - "ChoiceEnum", -] diff --git a/spaces/awacke1/1-SimPhysics/README.md b/spaces/awacke1/1-SimPhysics/README.md deleted file mode 100644 index b31a6ae704f0373db4e2fb14898ade5c42afb191..0000000000000000000000000000000000000000 --- a/spaces/awacke1/1-SimPhysics/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: 🏖️PlayCanvas Simulation Vehicle Physics⛱️🌊 Live HTML5 -emoji: 1-Sim🌊 -colorFrom: green -colorTo: gray -sdk: static -pinned: false ---- - -Inspired by Danny Lange, VP AI and ML at Unity -Reference: https://youtu.be/YsEDv13W1RI?t=48 - -Quote on MLAgents: ... if you think about what I just said about evolution and that the creation of tools for intelligence yeah so you have the basic nature you have the 3d spatial environment you have gravity and you have inertia and the physics engine and now we throw in ml agents which is a machine learning system - diff --git a/spaces/awacke1/Data-Augmentation/README.md b/spaces/awacke1/Data-Augmentation/README.md deleted file mode 100644 index f6266d46578b1a850a6bd09a6ec049220a241ce3..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Data-Augmentation/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Data Augmentation -emoji: 🏢 -colorFrom: pink -colorTo: pink -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/awacke1/Gamification-Grabble/README.md b/spaces/awacke1/Gamification-Grabble/README.md deleted file mode 100644 index ae064eb79d0ae27f9f56d31eb74014eb7ed533b7..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Gamification-Grabble/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Gamification Grabble -emoji: 🔥 -colorFrom: purple -colorTo: red -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/PerceiverEmotionClassifier/source/pipeline.py b/spaces/awacke1/PerceiverEmotionClassifier/source/pipeline.py deleted file mode 100644 index b1f42e4d9a2acf165418e146384fbe6240dd4f9f..0000000000000000000000000000000000000000 --- a/spaces/awacke1/PerceiverEmotionClassifier/source/pipeline.py +++ /dev/null @@ -1,127 +0,0 @@ -from typing import List - -import torch -from datasets import Dataset -from torch.utils.data import DataLoader -from tqdm import tqdm -from transformers import PerceiverTokenizer - - -def _map_outputs(predictions): - """ - Map model outputs to classes. - :param predictions: model ouptut batch - :return: - """ - - labels = [ - "admiration", - "amusement", - "anger", - "annoyance", - "approval", - "caring", - "confusion", - "curiosity", - "desire", - "disappointment", - "disapproval", - "disgust", - "embarrassment", - "excitement", - "fear", - "gratitude", - "grief", - "joy", - "love", - "nervousness", - "optimism", - "pride", - "realization", - "relief", - "remorse", - "sadness", - "surprise", - "neutral" - ] - classes = [] - for i, example in enumerate(predictions): - out_batch = [] - for j, category in enumerate(example): - out_batch.append(labels[j]) if category > 0.5 else None - classes.append(out_batch) - return classes - - -class MultiLabelPipeline: - """ - Multi label classification pipeline. - """ - - def __init__(self, model_path): - """ - Init MLC pipeline. - :param model_path: model to use - """ - - # Init attributes - self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - if self.device == 'cuda': - self.model = torch.load(model_path).eval().to(self.device) - else: - self.model = torch.load(model_path, map_location=torch.device('cpu')).eval().to(self.device) - self.tokenizer = PerceiverTokenizer.from_pretrained('deepmind/language-perceiver') - - def __call__(self, dataset, batch_size: int = 4): - """ - Processing pipeline. - :param dataset: dataset - :return: - """ - - # Tokenize inputs - dataset = dataset.map(lambda row: self.tokenizer(row['text'], padding="max_length", truncation=True), - batched=True, remove_columns=['text'], desc='Tokenizing') - dataset.set_format('torch', columns=['input_ids', 'attention_mask']) - dataloader = DataLoader(dataset, batch_size=batch_size) - - # Define output classes - classes = [] - mem_logs = [] - - with tqdm(dataloader, unit='batches') as progression: - for batch in progression: - progression.set_description('Inference') - # Forward - outputs = self.model(inputs=batch['input_ids'].to(self.device), - attention_mask=batch['attention_mask'].to(self.device), ) - - # Outputs - predictions = outputs.logits.cpu().detach().numpy() - - # Map predictions to classes - batch_classes = _map_outputs(predictions) - - for row in batch_classes: - classes.append(row) - - # Retrieve memory usage - memory = round(torch.cuda.memory_reserved(self.device) / 1e9, 2) - mem_logs.append(memory) - - # Update pbar - progression.set_postfix(memory=f"{round(sum(mem_logs) / len(mem_logs), 2)}Go") - - return classes - - -def inputs_to_dataset(inputs: List[str]): - """ - Convert a list of strings to a dataset object. - :param inputs: list of strings - :return: - """ - - inputs = {'text': [input for input in inputs]} - - return Dataset.from_dict(inputs) diff --git a/spaces/awacke1/PyGame2D/README.md b/spaces/awacke1/PyGame2D/README.md deleted file mode 100644 index 3f849f64cc0e42472386168e82ecdca7a83dde9a..0000000000000000000000000000000000000000 --- a/spaces/awacke1/PyGame2D/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: PyGame2D -emoji: ⚡ -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.4 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/QRCodeAIWriterReaderImaging/README.md b/spaces/awacke1/QRCodeAIWriterReaderImaging/README.md deleted file mode 100644 index 8965615b8c76f399bd6a0254fd52b4b236094713..0000000000000000000000000000000000000000 --- a/spaces/awacke1/QRCodeAIWriterReaderImaging/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: QRCodeAIWriterReaderImaging -emoji: 🏆 -colorFrom: yellow -colorTo: green -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awaiss/vits-models/utils.py b/spaces/awaiss/vits-models/utils.py deleted file mode 100644 index ee4b01ddfbe8173965371b29f770f3e87615fe71..0000000000000000000000000000000000000000 --- a/spaces/awaiss/vits-models/utils.py +++ /dev/null @@ -1,225 +0,0 @@ -import os -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -import librosa -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_audio_to_torch(full_path, target_sampling_rate): - audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True) - return torch.FloatTensor(audio.astype(np.float32)) - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/awinml/2-qa-earnings-sentencewise/utils/nltkmodules.py b/spaces/awinml/2-qa-earnings-sentencewise/utils/nltkmodules.py deleted file mode 100644 index c081a95438e01834212f548d955ed04e06d5478d..0000000000000000000000000000000000000000 --- a/spaces/awinml/2-qa-earnings-sentencewise/utils/nltkmodules.py +++ /dev/null @@ -1,5 +0,0 @@ -import nltk - -nltk.download("wordnet") -nltk.download("punkt") -nltk.download("stopwords") diff --git a/spaces/ayaderaghul/photo2monet/app.py b/spaces/ayaderaghul/photo2monet/app.py deleted file mode 100644 index 393fce9bf7e6d0cbabb57aa7a99c5a89627f8a58..0000000000000000000000000000000000000000 --- a/spaces/ayaderaghul/photo2monet/app.py +++ /dev/null @@ -1,70 +0,0 @@ -import gradio as gr -import keras -from keras.models import load_model -from tensorflow_addons.layers import InstanceNormalization -import matplotlib.pyplot as plt -import numpy as np -import tensorflow as tf - -cust = {'InstanceNormalization': InstanceNormalization} -model=load_model('g-cycleGAN-photo2monet-500images-epoch10_30_30_30_30_30_1000images_30_30_30.h5',cust) -path = [['ex1.jpg'], ['ex2.jpg'], ['ex4.jpg'],['ex6.jpg'],['ex7.jpg'],['ex8.jpg'],['ex9.jpg'],['ex10.jpg'],['ex12.jpg'],['ex13.jpg']] - -# preprocess -AUTOTUNE = tf.data.AUTOTUNE -BUFFER_SIZE = 400 -BATCH_SIZE = 1 -IMG_WIDTH = 256 -IMG_HEIGHT = 256 - -def resize(image,height,width): - resized_image = tf.image.resize(image,[height,width],method = tf.image.ResizeMethod.NEAREST_NEIGHBOR) - return resized_image - -def normalize(input_image): - input_image = (input_image/127.5) - 1 - return input_image - -def load(img_file): - img = tf.io.read_file(img_file) - img = tf.io.decode_jpeg(img) - real_image = tf.cast(img,tf.float32) - - return real_image - -def load_image_test(image_file): - re = load(image_file) - re = resize(re,IMG_HEIGHT,IMG_WIDTH) - re = normalize(re) - return re - - -def show_preds_image(image_path): - A = load_image_test(image_path) - A = np.expand_dims(A,axis=0) - B = model(A) - B = B[0] - B = B * 0.5 + 0.5 - B = B.numpy() - return B - -inputs_image = [ - gr.components.Image(shape=(256,256),type="filepath", label="Input Image"), -] -outputs_image = [ - gr.components.Image(shape=(256,256),type="numpy", label="Output Image").style(width=256, height=256), -] -interface_image = gr.Interface( - fn=show_preds_image, - inputs=inputs_image, - outputs=outputs_image, - title="photo2monet", - examples=path, - cache_examples=False, -) - -gr.TabbedInterface( - [interface_image], - tab_names=['Image inference'] -).queue().launch() - diff --git a/spaces/badongtakla/ithaca/ithaca/models/__init__.py b/spaces/badongtakla/ithaca/ithaca/models/__init__.py deleted file mode 100644 index 9b2a3f45a31d6bfc90a023c27ccfccaabdf22ba4..0000000000000000000000000000000000000000 --- a/spaces/badongtakla/ithaca/ithaca/models/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2021 the Ithaca Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/spaces/banana-projects/web3d/node_modules/three/src/geometries/CircleGeometry.js b/spaces/banana-projects/web3d/node_modules/three/src/geometries/CircleGeometry.js deleted file mode 100644 index 20904b9fbf8beb490fe370a9374669c2cb8145bf..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/geometries/CircleGeometry.js +++ /dev/null @@ -1,121 +0,0 @@ -/** - * @author benaadams / https://twitter.com/ben_a_adams - * @author Mugen87 / https://github.com/Mugen87 - * @author hughes - */ - -import { Geometry } from '../core/Geometry.js'; -import { BufferGeometry } from '../core/BufferGeometry.js'; -import { Float32BufferAttribute } from '../core/BufferAttribute.js'; -import { Vector3 } from '../math/Vector3.js'; -import { Vector2 } from '../math/Vector2.js'; - -// CircleGeometry - -function CircleGeometry( radius, segments, thetaStart, thetaLength ) { - - Geometry.call( this ); - - this.type = 'CircleGeometry'; - - this.parameters = { - radius: radius, - segments: segments, - thetaStart: thetaStart, - thetaLength: thetaLength - }; - - this.fromBufferGeometry( new CircleBufferGeometry( radius, segments, thetaStart, thetaLength ) ); - this.mergeVertices(); - -} - -CircleGeometry.prototype = Object.create( Geometry.prototype ); -CircleGeometry.prototype.constructor = CircleGeometry; - -// CircleBufferGeometry - -function CircleBufferGeometry( radius, segments, thetaStart, thetaLength ) { - - BufferGeometry.call( this ); - - this.type = 'CircleBufferGeometry'; - - this.parameters = { - radius: radius, - segments: segments, - thetaStart: thetaStart, - thetaLength: thetaLength - }; - - radius = radius || 1; - segments = segments !== undefined ? Math.max( 3, segments ) : 8; - - thetaStart = thetaStart !== undefined ? thetaStart : 0; - thetaLength = thetaLength !== undefined ? thetaLength : Math.PI * 2; - - // buffers - - var indices = []; - var vertices = []; - var normals = []; - var uvs = []; - - // helper variables - - var i, s; - var vertex = new Vector3(); - var uv = new Vector2(); - - // center point - - vertices.push( 0, 0, 0 ); - normals.push( 0, 0, 1 ); - uvs.push( 0.5, 0.5 ); - - for ( s = 0, i = 3; s <= segments; s ++, i += 3 ) { - - var segment = thetaStart + s / segments * thetaLength; - - // vertex - - vertex.x = radius * Math.cos( segment ); - vertex.y = radius * Math.sin( segment ); - - vertices.push( vertex.x, vertex.y, vertex.z ); - - // normal - - normals.push( 0, 0, 1 ); - - // uvs - - uv.x = ( vertices[ i ] / radius + 1 ) / 2; - uv.y = ( vertices[ i + 1 ] / radius + 1 ) / 2; - - uvs.push( uv.x, uv.y ); - - } - - // indices - - for ( i = 1; i <= segments; i ++ ) { - - indices.push( i, i + 1, 0 ); - - } - - // build geometry - - this.setIndex( indices ); - this.addAttribute( 'position', new Float32BufferAttribute( vertices, 3 ) ); - this.addAttribute( 'normal', new Float32BufferAttribute( normals, 3 ) ); - this.addAttribute( 'uv', new Float32BufferAttribute( uvs, 2 ) ); - -} - -CircleBufferGeometry.prototype = Object.create( BufferGeometry.prototype ); -CircleBufferGeometry.prototype.constructor = CircleBufferGeometry; - - -export { CircleGeometry, CircleBufferGeometry }; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/geometries/TorusKnotGeometry.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/geometries/TorusKnotGeometry.d.ts deleted file mode 100644 index 56307a1052039ef277d092018cbdba6231b74602..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/geometries/TorusKnotGeometry.d.ts +++ /dev/null @@ -1,44 +0,0 @@ -import { Geometry } from './../core/Geometry'; -import { BufferGeometry } from './../core/BufferGeometry'; - -export class TorusKnotBufferGeometry extends BufferGeometry { - constructor( - radius?: number, - tube?: number, - tubularSegments?: number, - radialSegments?: number, - p?: number, - q?: number - ); - - parameters: { - radius: number; - tube: number; - tubularSegments: number; - radialSegments: number; - p: number; - q: number; - heightScale: number; - }; -} - -export class TorusKnotGeometry extends Geometry { - constructor( - radius?: number, - tube?: number, - tubularSegments?: number, - radialSegments?: number, - p?: number, - q?: number - ); - - parameters: { - radius: number; - tube: number; - tubularSegments: number; - radialSegments: number; - p: number; - q: number; - heightScale: number; - }; -} diff --git a/spaces/banana-projects/web3d/node_modules/three/src/geometries/WireframeGeometry.js b/spaces/banana-projects/web3d/node_modules/three/src/geometries/WireframeGeometry.js deleted file mode 100644 index 521b4c69b3f300a6402d89a41371b9b6ddf14ea9..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/geometries/WireframeGeometry.js +++ /dev/null @@ -1,179 +0,0 @@ -/** - * @author mrdoob / http://mrdoob.com/ - * @author Mugen87 / https://github.com/Mugen87 - */ - -import { BufferGeometry } from '../core/BufferGeometry.js'; -import { Float32BufferAttribute } from '../core/BufferAttribute.js'; -import { Vector3 } from '../math/Vector3.js'; - -function WireframeGeometry( geometry ) { - - BufferGeometry.call( this ); - - this.type = 'WireframeGeometry'; - - // buffer - - var vertices = []; - - // helper variables - - var i, j, l, o, ol; - var edge = [ 0, 0 ], edges = {}, e, edge1, edge2; - var key, keys = [ 'a', 'b', 'c' ]; - var vertex; - - // different logic for Geometry and BufferGeometry - - if ( geometry && geometry.isGeometry ) { - - // create a data structure that contains all edges without duplicates - - var faces = geometry.faces; - - for ( i = 0, l = faces.length; i < l; i ++ ) { - - var face = faces[ i ]; - - for ( j = 0; j < 3; j ++ ) { - - edge1 = face[ keys[ j ] ]; - edge2 = face[ keys[ ( j + 1 ) % 3 ] ]; - edge[ 0 ] = Math.min( edge1, edge2 ); // sorting prevents duplicates - edge[ 1 ] = Math.max( edge1, edge2 ); - - key = edge[ 0 ] + ',' + edge[ 1 ]; - - if ( edges[ key ] === undefined ) { - - edges[ key ] = { index1: edge[ 0 ], index2: edge[ 1 ] }; - - } - - } - - } - - // generate vertices - - for ( key in edges ) { - - e = edges[ key ]; - - vertex = geometry.vertices[ e.index1 ]; - vertices.push( vertex.x, vertex.y, vertex.z ); - - vertex = geometry.vertices[ e.index2 ]; - vertices.push( vertex.x, vertex.y, vertex.z ); - - } - - } else if ( geometry && geometry.isBufferGeometry ) { - - var position, indices, groups; - var group, start, count; - var index1, index2; - - vertex = new Vector3(); - - if ( geometry.index !== null ) { - - // indexed BufferGeometry - - position = geometry.attributes.position; - indices = geometry.index; - groups = geometry.groups; - - if ( groups.length === 0 ) { - - groups = [ { start: 0, count: indices.count, materialIndex: 0 } ]; - - } - - // create a data structure that contains all eges without duplicates - - for ( o = 0, ol = groups.length; o < ol; ++ o ) { - - group = groups[ o ]; - - start = group.start; - count = group.count; - - for ( i = start, l = ( start + count ); i < l; i += 3 ) { - - for ( j = 0; j < 3; j ++ ) { - - edge1 = indices.getX( i + j ); - edge2 = indices.getX( i + ( j + 1 ) % 3 ); - edge[ 0 ] = Math.min( edge1, edge2 ); // sorting prevents duplicates - edge[ 1 ] = Math.max( edge1, edge2 ); - - key = edge[ 0 ] + ',' + edge[ 1 ]; - - if ( edges[ key ] === undefined ) { - - edges[ key ] = { index1: edge[ 0 ], index2: edge[ 1 ] }; - - } - - } - - } - - } - - // generate vertices - - for ( key in edges ) { - - e = edges[ key ]; - - vertex.fromBufferAttribute( position, e.index1 ); - vertices.push( vertex.x, vertex.y, vertex.z ); - - vertex.fromBufferAttribute( position, e.index2 ); - vertices.push( vertex.x, vertex.y, vertex.z ); - - } - - } else { - - // non-indexed BufferGeometry - - position = geometry.attributes.position; - - for ( i = 0, l = ( position.count / 3 ); i < l; i ++ ) { - - for ( j = 0; j < 3; j ++ ) { - - // three edges per triangle, an edge is represented as (index1, index2) - // e.g. the first triangle has the following edges: (0,1),(1,2),(2,0) - - index1 = 3 * i + j; - vertex.fromBufferAttribute( position, index1 ); - vertices.push( vertex.x, vertex.y, vertex.z ); - - index2 = 3 * i + ( ( j + 1 ) % 3 ); - vertex.fromBufferAttribute( position, index2 ); - vertices.push( vertex.x, vertex.y, vertex.z ); - - } - - } - - } - - } - - // build geometry - - this.addAttribute( 'position', new Float32BufferAttribute( vertices, 3 ) ); - -} - -WireframeGeometry.prototype = Object.create( BufferGeometry.prototype ); -WireframeGeometry.prototype.constructor = WireframeGeometry; - - -export { WireframeGeometry }; diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327003506.py b/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327003506.py deleted file mode 100644 index df673ec9965e60183b387f1a966ef6101adfb251..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327003506.py +++ /dev/null @@ -1,65 +0,0 @@ -import os -#os.system("pip install gfpgan") - -#os.system("pip freeze") -#os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth -P .") -import random -import gradio as gr -from PIL import Image -import torch -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/ab/Abraham_Lincoln_O-77_matte_collodion_print.jpg/1024px-Abraham_Lincoln_O-77_matte_collodion_print.jpg', 'lincoln.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/5/50/Albert_Einstein_%28Nobel%29.png', 'einstein.png') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Thomas_Edison2.jpg/1024px-Thomas_Edison2.jpg', 'edison.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/a9/Henry_Ford_1888.jpg/1024px-Henry_Ford_1888.jpg', 'Henry.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/0/06/Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg/800px-Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg', 'Frida.jpg') - - - - -import cv2 -import glob -import numpy as np -from basicsr.utils import imwrite -from gfpgan import GFPGANer - -import warnings -warnings.warn('The unoptimized RealESRGAN is very slow on CPU. We do not use it. ' - 'If you really want to use it, please modify the corresponding codes.') -bg_upsampler = None - - - -# set up GFPGAN restorer -restorer = GFPGANer( - model_path='experiments/pretrained_models/GFPGANv1.3.pth', - upscale=2, - arch='clean', - channel_multiplier=2, - bg_upsampler=bg_upsampler) - - -def inference(img): - input_img = cv2.imread(img, cv2.IMREAD_COLOR) - cropped_faces, restored_img , restored_faces= restorer.enhance( - input_img, has_aligned=False, only_center_face=False, paste_back=True) - - return Image.fromarray(restored_img[2][:,:,::-1]) - -title = "GFP-GAN" -description = "Gradio demo for GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please click submit only once" -article = "

    Towards Real-World Blind Face Restoration with Generative Facial Prior | Github Repo

    visitor badge
    " -gr.Interface( - inference, - [gr.inputs.Image(type="filepath", label="Input")], - gr.outputs.Image(type="pil", label="Output"), - title=title, - description=description, - article=article, - examples=[ - ['lincoln.jpg'], - ['einstein.png'], - ['edison.jpg'], - ['Henry.jpg'], - ['Frida.jpg'] - ] - ).launch(enable_queue=True,cache_examples=True) \ No newline at end of file diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/metrics/niqe.py b/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/metrics/niqe.py deleted file mode 100644 index e261da25f457151a86f1062da2e45dd7909ee0dd..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/metrics/niqe.py +++ /dev/null @@ -1,197 +0,0 @@ -import cv2 -import math -import numpy as np -import os -from scipy.ndimage.filters import convolve -from scipy.special import gamma - -from basicsr.metrics.metric_util import reorder_image, to_y_channel -from basicsr.utils.matlab_functions import imresize -from basicsr.utils.registry import METRIC_REGISTRY - - -def estimate_aggd_param(block): - """Estimate AGGD (Asymmetric Generalized Gaussian Distribution) parameters. - - Args: - block (ndarray): 2D Image block. - - Returns: - tuple: alpha (float), beta_l (float) and beta_r (float) for the AGGD - distribution (Estimating the parames in Equation 7 in the paper). - """ - block = block.flatten() - gam = np.arange(0.2, 10.001, 0.001) # len = 9801 - gam_reciprocal = np.reciprocal(gam) - r_gam = np.square(gamma(gam_reciprocal * 2)) / (gamma(gam_reciprocal) * gamma(gam_reciprocal * 3)) - - left_std = np.sqrt(np.mean(block[block < 0]**2)) - right_std = np.sqrt(np.mean(block[block > 0]**2)) - gammahat = left_std / right_std - rhat = (np.mean(np.abs(block)))**2 / np.mean(block**2) - rhatnorm = (rhat * (gammahat**3 + 1) * (gammahat + 1)) / ((gammahat**2 + 1)**2) - array_position = np.argmin((r_gam - rhatnorm)**2) - - alpha = gam[array_position] - beta_l = left_std * np.sqrt(gamma(1 / alpha) / gamma(3 / alpha)) - beta_r = right_std * np.sqrt(gamma(1 / alpha) / gamma(3 / alpha)) - return (alpha, beta_l, beta_r) - - -def compute_feature(block): - """Compute features. - - Args: - block (ndarray): 2D Image block. - - Returns: - list: Features with length of 18. - """ - feat = [] - alpha, beta_l, beta_r = estimate_aggd_param(block) - feat.extend([alpha, (beta_l + beta_r) / 2]) - - # distortions disturb the fairly regular structure of natural images. - # This deviation can be captured by analyzing the sample distribution of - # the products of pairs of adjacent coefficients computed along - # horizontal, vertical and diagonal orientations. - shifts = [[0, 1], [1, 0], [1, 1], [1, -1]] - for i in range(len(shifts)): - shifted_block = np.roll(block, shifts[i], axis=(0, 1)) - alpha, beta_l, beta_r = estimate_aggd_param(block * shifted_block) - # Eq. 8 - mean = (beta_r - beta_l) * (gamma(2 / alpha) / gamma(1 / alpha)) - feat.extend([alpha, mean, beta_l, beta_r]) - return feat - - -def niqe(img, mu_pris_param, cov_pris_param, gaussian_window, block_size_h=96, block_size_w=96): - """Calculate NIQE (Natural Image Quality Evaluator) metric. - - Ref: Making a "Completely Blind" Image Quality Analyzer. - This implementation could produce almost the same results as the official - MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip - - Note that we do not include block overlap height and width, since they are - always 0 in the official implementation. - - For good performance, it is advisable by the official implementation to - divide the distorted image in to the same size patched as used for the - construction of multivariate Gaussian model. - - Args: - img (ndarray): Input image whose quality needs to be computed. The - image must be a gray or Y (of YCbCr) image with shape (h, w). - Range [0, 255] with float type. - mu_pris_param (ndarray): Mean of a pre-defined multivariate Gaussian - model calculated on the pristine dataset. - cov_pris_param (ndarray): Covariance of a pre-defined multivariate - Gaussian model calculated on the pristine dataset. - gaussian_window (ndarray): A 7x7 Gaussian window used for smoothing the - image. - block_size_h (int): Height of the blocks in to which image is divided. - Default: 96 (the official recommended value). - block_size_w (int): Width of the blocks in to which image is divided. - Default: 96 (the official recommended value). - """ - assert img.ndim == 2, ('Input image must be a gray or Y (of YCbCr) image with shape (h, w).') - # crop image - h, w = img.shape - num_block_h = math.floor(h / block_size_h) - num_block_w = math.floor(w / block_size_w) - img = img[0:num_block_h * block_size_h, 0:num_block_w * block_size_w] - - distparam = [] # dist param is actually the multiscale features - for scale in (1, 2): # perform on two scales (1, 2) - mu = convolve(img, gaussian_window, mode='nearest') - sigma = np.sqrt(np.abs(convolve(np.square(img), gaussian_window, mode='nearest') - np.square(mu))) - # normalize, as in Eq. 1 in the paper - img_nomalized = (img - mu) / (sigma + 1) - - feat = [] - for idx_w in range(num_block_w): - for idx_h in range(num_block_h): - # process ecah block - block = img_nomalized[idx_h * block_size_h // scale:(idx_h + 1) * block_size_h // scale, - idx_w * block_size_w // scale:(idx_w + 1) * block_size_w // scale] - feat.append(compute_feature(block)) - - distparam.append(np.array(feat)) - - if scale == 1: - img = imresize(img / 255., scale=0.5, antialiasing=True) - img = img * 255. - - distparam = np.concatenate(distparam, axis=1) - - # fit a MVG (multivariate Gaussian) model to distorted patch features - mu_distparam = np.nanmean(distparam, axis=0) - # use nancov. ref: https://ww2.mathworks.cn/help/stats/nancov.html - distparam_no_nan = distparam[~np.isnan(distparam).any(axis=1)] - cov_distparam = np.cov(distparam_no_nan, rowvar=False) - - # compute niqe quality, Eq. 10 in the paper - invcov_param = np.linalg.pinv((cov_pris_param + cov_distparam) / 2) - quality = np.matmul( - np.matmul((mu_pris_param - mu_distparam), invcov_param), np.transpose((mu_pris_param - mu_distparam))) - - quality = np.sqrt(quality) - quality = float(np.squeeze(quality)) - return quality - - -@METRIC_REGISTRY.register() -def calculate_niqe(img, crop_border, input_order='HWC', convert_to='y', **kwargs): - """Calculate NIQE (Natural Image Quality Evaluator) metric. - - Ref: Making a "Completely Blind" Image Quality Analyzer. - This implementation could produce almost the same results as the official - MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip - - > MATLAB R2021a result for tests/data/baboon.png: 5.72957338 (5.7296) - > Our re-implementation result for tests/data/baboon.png: 5.7295763 (5.7296) - - We use the official params estimated from the pristine dataset. - We use the recommended block size (96, 96) without overlaps. - - Args: - img (ndarray): Input image whose quality needs to be computed. - The input image must be in range [0, 255] with float/int type. - The input_order of image can be 'HW' or 'HWC' or 'CHW'. (BGR order) - If the input order is 'HWC' or 'CHW', it will be converted to gray - or Y (of YCbCr) image according to the ``convert_to`` argument. - crop_border (int): Cropped pixels in each edge of an image. These - pixels are not involved in the metric calculation. - input_order (str): Whether the input order is 'HW', 'HWC' or 'CHW'. - Default: 'HWC'. - convert_to (str): Whether converted to 'y' (of MATLAB YCbCr) or 'gray'. - Default: 'y'. - - Returns: - float: NIQE result. - """ - ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - # we use the official params estimated from the pristine dataset. - niqe_pris_params = np.load(os.path.join(ROOT_DIR, 'niqe_pris_params.npz')) - mu_pris_param = niqe_pris_params['mu_pris_param'] - cov_pris_param = niqe_pris_params['cov_pris_param'] - gaussian_window = niqe_pris_params['gaussian_window'] - - img = img.astype(np.float32) - if input_order != 'HW': - img = reorder_image(img, input_order=input_order) - if convert_to == 'y': - img = to_y_channel(img) - elif convert_to == 'gray': - img = cv2.cvtColor(img / 255., cv2.COLOR_BGR2GRAY) * 255. - img = np.squeeze(img) - - if crop_border != 0: - img = img[crop_border:-crop_border, crop_border:-crop_border] - - # round is necessary for being consistent with MATLAB's result - img = img.round() - - niqe_result = niqe(img, mu_pris_param, cov_pris_param, gaussian_window) - - return niqe_result diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/experiments/pretrained_models/README.md b/spaces/beihai/GFPGAN-V1.3-whole-image/experiments/pretrained_models/README.md deleted file mode 100644 index 3401a5ca9b393e0033f58c5af8905961565826d9..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/experiments/pretrained_models/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Pre-trained Models and Other Data - -Download pre-trained models and other data. Put them in this folder. - -1. [Pretrained StyleGAN2 model: StyleGAN2_512_Cmul1_FFHQ_B12G4_scratch_800k.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/StyleGAN2_512_Cmul1_FFHQ_B12G4_scratch_800k.pth) -1. [Component locations of FFHQ: FFHQ_eye_mouth_landmarks_512.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/FFHQ_eye_mouth_landmarks_512.pth) -1. [A simple ArcFace model: arcface_resnet18.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/arcface_resnet18.pth) diff --git a/spaces/bguberfain/Detic/detic/data/datasets/lvis_22k_categories.py b/spaces/bguberfain/Detic/detic/data/datasets/lvis_22k_categories.py deleted file mode 100644 index 9525f0873d68d84dd691979c32eaadd7860f59fe..0000000000000000000000000000000000000000 --- a/spaces/bguberfain/Detic/detic/data/datasets/lvis_22k_categories.py +++ /dev/null @@ -1 +0,0 @@ -CATEGORIES = [{'name': 'aerosol_can', 'id': 1, 'frequency': 'c', 'synset': 'aerosol.n.02'}, {'name': 'air_conditioner', 'id': 2, 'frequency': 'f', 'synset': 'air_conditioner.n.01'}, {'name': 'airplane', 'id': 3, 'frequency': 'f', 'synset': 'airplane.n.01'}, {'name': 'alarm_clock', 'id': 4, 'frequency': 'f', 'synset': 'alarm_clock.n.01'}, {'name': 'alcohol', 'id': 5, 'frequency': 'c', 'synset': 'alcohol.n.01'}, {'name': 'alligator', 'id': 6, 'frequency': 'c', 'synset': 'alligator.n.02'}, {'name': 'almond', 'id': 7, 'frequency': 'c', 'synset': 'almond.n.02'}, {'name': 'ambulance', 'id': 8, 'frequency': 'c', 'synset': 'ambulance.n.01'}, {'name': 'amplifier', 'id': 9, 'frequency': 'c', 'synset': 'amplifier.n.01'}, {'name': 'anklet', 'id': 10, 'frequency': 'c', 'synset': 'anklet.n.03'}, {'name': 'antenna', 'id': 11, 'frequency': 'f', 'synset': 'antenna.n.01'}, {'name': 'apple', 'id': 12, 'frequency': 'f', 'synset': 'apple.n.01'}, {'name': 'applesauce', 'id': 13, 'frequency': 'r', 'synset': 'applesauce.n.01'}, {'name': 'apricot', 'id': 14, 'frequency': 'r', 'synset': 'apricot.n.02'}, {'name': 'apron', 'id': 15, 'frequency': 'f', 'synset': 'apron.n.01'}, {'name': 'aquarium', 'id': 16, 'frequency': 'c', 'synset': 'aquarium.n.01'}, {'name': 'arctic_(type_of_shoe)', 'id': 17, 'frequency': 'r', 'synset': 'arctic.n.02'}, {'name': 'armband', 'id': 18, 'frequency': 'c', 'synset': 'armband.n.02'}, {'name': 'armchair', 'id': 19, 'frequency': 'f', 'synset': 'armchair.n.01'}, {'name': 'armoire', 'id': 20, 'frequency': 'r', 'synset': 'armoire.n.01'}, {'name': 'armor', 'id': 21, 'frequency': 'r', 'synset': 'armor.n.01'}, {'name': 'artichoke', 'id': 22, 'frequency': 'c', 'synset': 'artichoke.n.02'}, {'name': 'trash_can', 'id': 23, 'frequency': 'f', 'synset': 'ashcan.n.01'}, {'name': 'ashtray', 'id': 24, 'frequency': 'c', 'synset': 'ashtray.n.01'}, {'name': 'asparagus', 'id': 25, 'frequency': 'c', 'synset': 'asparagus.n.02'}, {'name': 'atomizer', 'id': 26, 'frequency': 'c', 'synset': 'atomizer.n.01'}, {'name': 'avocado', 'id': 27, 'frequency': 'f', 'synset': 'avocado.n.01'}, {'name': 'award', 'id': 28, 'frequency': 'c', 'synset': 'award.n.02'}, {'name': 'awning', 'id': 29, 'frequency': 'f', 'synset': 'awning.n.01'}, {'name': 'ax', 'id': 30, 'frequency': 'r', 'synset': 'ax.n.01'}, {'name': 'baboon', 'id': 31, 'frequency': 'r', 'synset': 'baboon.n.01'}, {'name': 'baby_buggy', 'id': 32, 'frequency': 'f', 'synset': 'baby_buggy.n.01'}, {'name': 'basketball_backboard', 'id': 33, 'frequency': 'c', 'synset': 'backboard.n.01'}, {'name': 'backpack', 'id': 34, 'frequency': 'f', 'synset': 'backpack.n.01'}, {'name': 'handbag', 'id': 35, 'frequency': 'f', 'synset': 'bag.n.04'}, {'name': 'suitcase', 'id': 36, 'frequency': 'f', 'synset': 'bag.n.06'}, {'name': 'bagel', 'id': 37, 'frequency': 'c', 'synset': 'bagel.n.01'}, {'name': 'bagpipe', 'id': 38, 'frequency': 'r', 'synset': 'bagpipe.n.01'}, {'name': 'baguet', 'id': 39, 'frequency': 'r', 'synset': 'baguet.n.01'}, {'name': 'bait', 'id': 40, 'frequency': 'r', 'synset': 'bait.n.02'}, {'name': 'ball', 'id': 41, 'frequency': 'f', 'synset': 'ball.n.06'}, {'name': 'ballet_skirt', 'id': 42, 'frequency': 'r', 'synset': 'ballet_skirt.n.01'}, {'name': 'balloon', 'id': 43, 'frequency': 'f', 'synset': 'balloon.n.01'}, {'name': 'bamboo', 'id': 44, 'frequency': 'c', 'synset': 'bamboo.n.02'}, {'name': 'banana', 'id': 45, 'frequency': 'f', 'synset': 'banana.n.02'}, {'name': 'Band_Aid', 'id': 46, 'frequency': 'c', 'synset': 'band_aid.n.01'}, {'name': 'bandage', 'id': 47, 'frequency': 'c', 'synset': 'bandage.n.01'}, {'name': 'bandanna', 'id': 48, 'frequency': 'f', 'synset': 'bandanna.n.01'}, {'name': 'banjo', 'id': 49, 'frequency': 'r', 'synset': 'banjo.n.01'}, {'name': 'banner', 'id': 50, 'frequency': 'f', 'synset': 'banner.n.01'}, {'name': 'barbell', 'id': 51, 'frequency': 'r', 'synset': 'barbell.n.01'}, {'name': 'barge', 'id': 52, 'frequency': 'r', 'synset': 'barge.n.01'}, {'name': 'barrel', 'id': 53, 'frequency': 'f', 'synset': 'barrel.n.02'}, {'name': 'barrette', 'id': 54, 'frequency': 'c', 'synset': 'barrette.n.01'}, {'name': 'barrow', 'id': 55, 'frequency': 'c', 'synset': 'barrow.n.03'}, {'name': 'baseball_base', 'id': 56, 'frequency': 'f', 'synset': 'base.n.03'}, {'name': 'baseball', 'id': 57, 'frequency': 'f', 'synset': 'baseball.n.02'}, {'name': 'baseball_bat', 'id': 58, 'frequency': 'f', 'synset': 'baseball_bat.n.01'}, {'name': 'baseball_cap', 'id': 59, 'frequency': 'f', 'synset': 'baseball_cap.n.01'}, {'name': 'baseball_glove', 'id': 60, 'frequency': 'f', 'synset': 'baseball_glove.n.01'}, {'name': 'basket', 'id': 61, 'frequency': 'f', 'synset': 'basket.n.01'}, {'name': 'basketball', 'id': 62, 'frequency': 'c', 'synset': 'basketball.n.02'}, {'name': 'bass_horn', 'id': 63, 'frequency': 'r', 'synset': 'bass_horn.n.01'}, {'name': 'bat_(animal)', 'id': 64, 'frequency': 'c', 'synset': 'bat.n.01'}, {'name': 'bath_mat', 'id': 65, 'frequency': 'f', 'synset': 'bath_mat.n.01'}, {'name': 'bath_towel', 'id': 66, 'frequency': 'f', 'synset': 'bath_towel.n.01'}, {'name': 'bathrobe', 'id': 67, 'frequency': 'c', 'synset': 'bathrobe.n.01'}, {'name': 'bathtub', 'id': 68, 'frequency': 'f', 'synset': 'bathtub.n.01'}, {'name': 'batter_(food)', 'id': 69, 'frequency': 'r', 'synset': 'batter.n.02'}, {'name': 'battery', 'id': 70, 'frequency': 'c', 'synset': 'battery.n.02'}, {'name': 'beachball', 'id': 71, 'frequency': 'r', 'synset': 'beach_ball.n.01'}, {'name': 'bead', 'id': 72, 'frequency': 'c', 'synset': 'bead.n.01'}, {'name': 'bean_curd', 'id': 73, 'frequency': 'c', 'synset': 'bean_curd.n.01'}, {'name': 'beanbag', 'id': 74, 'frequency': 'c', 'synset': 'beanbag.n.01'}, {'name': 'beanie', 'id': 75, 'frequency': 'f', 'synset': 'beanie.n.01'}, {'name': 'bear', 'id': 76, 'frequency': 'f', 'synset': 'bear.n.01'}, {'name': 'bed', 'id': 77, 'frequency': 'f', 'synset': 'bed.n.01'}, {'name': 'bedpan', 'id': 78, 'frequency': 'r', 'synset': 'bedpan.n.01'}, {'name': 'bedspread', 'id': 79, 'frequency': 'f', 'synset': 'bedspread.n.01'}, {'name': 'cow', 'id': 80, 'frequency': 'f', 'synset': 'beef.n.01'}, {'name': 'beef_(food)', 'id': 81, 'frequency': 'f', 'synset': 'beef.n.02'}, {'name': 'beeper', 'id': 82, 'frequency': 'r', 'synset': 'beeper.n.01'}, {'name': 'beer_bottle', 'id': 83, 'frequency': 'f', 'synset': 'beer_bottle.n.01'}, {'name': 'beer_can', 'id': 84, 'frequency': 'c', 'synset': 'beer_can.n.01'}, {'name': 'beetle', 'id': 85, 'frequency': 'r', 'synset': 'beetle.n.01'}, {'name': 'bell', 'id': 86, 'frequency': 'f', 'synset': 'bell.n.01'}, {'name': 'bell_pepper', 'id': 87, 'frequency': 'f', 'synset': 'bell_pepper.n.02'}, {'name': 'belt', 'id': 88, 'frequency': 'f', 'synset': 'belt.n.02'}, {'name': 'belt_buckle', 'id': 89, 'frequency': 'f', 'synset': 'belt_buckle.n.01'}, {'name': 'bench', 'id': 90, 'frequency': 'f', 'synset': 'bench.n.01'}, {'name': 'beret', 'id': 91, 'frequency': 'c', 'synset': 'beret.n.01'}, {'name': 'bib', 'id': 92, 'frequency': 'c', 'synset': 'bib.n.02'}, {'name': 'Bible', 'id': 93, 'frequency': 'r', 'synset': 'bible.n.01'}, {'name': 'bicycle', 'id': 94, 'frequency': 'f', 'synset': 'bicycle.n.01'}, {'name': 'visor', 'id': 95, 'frequency': 'f', 'synset': 'bill.n.09'}, {'name': 'billboard', 'id': 96, 'frequency': 'f', 'synset': 'billboard.n.01'}, {'name': 'binder', 'id': 97, 'frequency': 'c', 'synset': 'binder.n.03'}, {'name': 'binoculars', 'id': 98, 'frequency': 'c', 'synset': 'binoculars.n.01'}, {'name': 'bird', 'id': 99, 'frequency': 'f', 'synset': 'bird.n.01'}, {'name': 'birdfeeder', 'id': 100, 'frequency': 'c', 'synset': 'bird_feeder.n.01'}, {'name': 'birdbath', 'id': 101, 'frequency': 'c', 'synset': 'birdbath.n.01'}, {'name': 'birdcage', 'id': 102, 'frequency': 'c', 'synset': 'birdcage.n.01'}, {'name': 'birdhouse', 'id': 103, 'frequency': 'c', 'synset': 'birdhouse.n.01'}, {'name': 'birthday_cake', 'id': 104, 'frequency': 'f', 'synset': 'birthday_cake.n.01'}, {'name': 'birthday_card', 'id': 105, 'frequency': 'r', 'synset': 'birthday_card.n.01'}, {'name': 'pirate_flag', 'id': 106, 'frequency': 'r', 'synset': 'black_flag.n.01'}, {'name': 'black_sheep', 'id': 107, 'frequency': 'c', 'synset': 'black_sheep.n.02'}, {'name': 'blackberry', 'id': 108, 'frequency': 'c', 'synset': 'blackberry.n.01'}, {'name': 'blackboard', 'id': 109, 'frequency': 'f', 'synset': 'blackboard.n.01'}, {'name': 'blanket', 'id': 110, 'frequency': 'f', 'synset': 'blanket.n.01'}, {'name': 'blazer', 'id': 111, 'frequency': 'c', 'synset': 'blazer.n.01'}, {'name': 'blender', 'id': 112, 'frequency': 'f', 'synset': 'blender.n.01'}, {'name': 'blimp', 'id': 113, 'frequency': 'r', 'synset': 'blimp.n.02'}, {'name': 'blinker', 'id': 114, 'frequency': 'f', 'synset': 'blinker.n.01'}, {'name': 'blouse', 'id': 115, 'frequency': 'f', 'synset': 'blouse.n.01'}, {'name': 'blueberry', 'id': 116, 'frequency': 'f', 'synset': 'blueberry.n.02'}, {'name': 'gameboard', 'id': 117, 'frequency': 'r', 'synset': 'board.n.09'}, {'name': 'boat', 'id': 118, 'frequency': 'f', 'synset': 'boat.n.01'}, {'name': 'bob', 'id': 119, 'frequency': 'r', 'synset': 'bob.n.05'}, {'name': 'bobbin', 'id': 120, 'frequency': 'c', 'synset': 'bobbin.n.01'}, {'name': 'bobby_pin', 'id': 121, 'frequency': 'c', 'synset': 'bobby_pin.n.01'}, {'name': 'boiled_egg', 'id': 122, 'frequency': 'c', 'synset': 'boiled_egg.n.01'}, {'name': 'bolo_tie', 'id': 123, 'frequency': 'r', 'synset': 'bolo_tie.n.01'}, {'name': 'deadbolt', 'id': 124, 'frequency': 'c', 'synset': 'bolt.n.03'}, {'name': 'bolt', 'id': 125, 'frequency': 'f', 'synset': 'bolt.n.06'}, {'name': 'bonnet', 'id': 126, 'frequency': 'r', 'synset': 'bonnet.n.01'}, {'name': 'book', 'id': 127, 'frequency': 'f', 'synset': 'book.n.01'}, {'name': 'bookcase', 'id': 128, 'frequency': 'c', 'synset': 'bookcase.n.01'}, {'name': 'booklet', 'id': 129, 'frequency': 'c', 'synset': 'booklet.n.01'}, {'name': 'bookmark', 'id': 130, 'frequency': 'r', 'synset': 'bookmark.n.01'}, {'name': 'boom_microphone', 'id': 131, 'frequency': 'r', 'synset': 'boom.n.04'}, {'name': 'boot', 'id': 132, 'frequency': 'f', 'synset': 'boot.n.01'}, {'name': 'bottle', 'id': 133, 'frequency': 'f', 'synset': 'bottle.n.01'}, {'name': 'bottle_opener', 'id': 134, 'frequency': 'c', 'synset': 'bottle_opener.n.01'}, {'name': 'bouquet', 'id': 135, 'frequency': 'c', 'synset': 'bouquet.n.01'}, {'name': 'bow_(weapon)', 'id': 136, 'frequency': 'r', 'synset': 'bow.n.04'}, {'name': 'bow_(decorative_ribbons)', 'id': 137, 'frequency': 'f', 'synset': 'bow.n.08'}, {'name': 'bow-tie', 'id': 138, 'frequency': 'f', 'synset': 'bow_tie.n.01'}, {'name': 'bowl', 'id': 139, 'frequency': 'f', 'synset': 'bowl.n.03'}, {'name': 'pipe_bowl', 'id': 140, 'frequency': 'r', 'synset': 'bowl.n.08'}, {'name': 'bowler_hat', 'id': 141, 'frequency': 'c', 'synset': 'bowler_hat.n.01'}, {'name': 'bowling_ball', 'id': 142, 'frequency': 'r', 'synset': 'bowling_ball.n.01'}, {'name': 'box', 'id': 143, 'frequency': 'f', 'synset': 'box.n.01'}, {'name': 'boxing_glove', 'id': 144, 'frequency': 'r', 'synset': 'boxing_glove.n.01'}, {'name': 'suspenders', 'id': 145, 'frequency': 'c', 'synset': 'brace.n.06'}, {'name': 'bracelet', 'id': 146, 'frequency': 'f', 'synset': 'bracelet.n.02'}, {'name': 'brass_plaque', 'id': 147, 'frequency': 'r', 'synset': 'brass.n.07'}, {'name': 'brassiere', 'id': 148, 'frequency': 'c', 'synset': 'brassiere.n.01'}, {'name': 'bread-bin', 'id': 149, 'frequency': 'c', 'synset': 'bread-bin.n.01'}, {'name': 'bread', 'id': 150, 'frequency': 'f', 'synset': 'bread.n.01'}, {'name': 'breechcloth', 'id': 151, 'frequency': 'r', 'synset': 'breechcloth.n.01'}, {'name': 'bridal_gown', 'id': 152, 'frequency': 'f', 'synset': 'bridal_gown.n.01'}, {'name': 'briefcase', 'id': 153, 'frequency': 'c', 'synset': 'briefcase.n.01'}, {'name': 'broccoli', 'id': 154, 'frequency': 'f', 'synset': 'broccoli.n.01'}, {'name': 'broach', 'id': 155, 'frequency': 'r', 'synset': 'brooch.n.01'}, {'name': 'broom', 'id': 156, 'frequency': 'c', 'synset': 'broom.n.01'}, {'name': 'brownie', 'id': 157, 'frequency': 'c', 'synset': 'brownie.n.03'}, {'name': 'brussels_sprouts', 'id': 158, 'frequency': 'c', 'synset': 'brussels_sprouts.n.01'}, {'name': 'bubble_gum', 'id': 159, 'frequency': 'r', 'synset': 'bubble_gum.n.01'}, {'name': 'bucket', 'id': 160, 'frequency': 'f', 'synset': 'bucket.n.01'}, {'name': 'horse_buggy', 'id': 161, 'frequency': 'r', 'synset': 'buggy.n.01'}, {'name': 'bull', 'id': 162, 'frequency': 'c', 'synset': 'bull.n.11'}, {'name': 'bulldog', 'id': 163, 'frequency': 'c', 'synset': 'bulldog.n.01'}, {'name': 'bulldozer', 'id': 164, 'frequency': 'r', 'synset': 'bulldozer.n.01'}, {'name': 'bullet_train', 'id': 165, 'frequency': 'c', 'synset': 'bullet_train.n.01'}, {'name': 'bulletin_board', 'id': 166, 'frequency': 'c', 'synset': 'bulletin_board.n.02'}, {'name': 'bulletproof_vest', 'id': 167, 'frequency': 'r', 'synset': 'bulletproof_vest.n.01'}, {'name': 'bullhorn', 'id': 168, 'frequency': 'c', 'synset': 'bullhorn.n.01'}, {'name': 'bun', 'id': 169, 'frequency': 'f', 'synset': 'bun.n.01'}, {'name': 'bunk_bed', 'id': 170, 'frequency': 'c', 'synset': 'bunk_bed.n.01'}, {'name': 'buoy', 'id': 171, 'frequency': 'f', 'synset': 'buoy.n.01'}, {'name': 'burrito', 'id': 172, 'frequency': 'r', 'synset': 'burrito.n.01'}, {'name': 'bus_(vehicle)', 'id': 173, 'frequency': 'f', 'synset': 'bus.n.01'}, {'name': 'business_card', 'id': 174, 'frequency': 'c', 'synset': 'business_card.n.01'}, {'name': 'butter', 'id': 175, 'frequency': 'f', 'synset': 'butter.n.01'}, {'name': 'butterfly', 'id': 176, 'frequency': 'c', 'synset': 'butterfly.n.01'}, {'name': 'button', 'id': 177, 'frequency': 'f', 'synset': 'button.n.01'}, {'name': 'cab_(taxi)', 'id': 178, 'frequency': 'f', 'synset': 'cab.n.03'}, {'name': 'cabana', 'id': 179, 'frequency': 'r', 'synset': 'cabana.n.01'}, {'name': 'cabin_car', 'id': 180, 'frequency': 'c', 'synset': 'cabin_car.n.01'}, {'name': 'cabinet', 'id': 181, 'frequency': 'f', 'synset': 'cabinet.n.01'}, {'name': 'locker', 'id': 182, 'frequency': 'r', 'synset': 'cabinet.n.03'}, {'name': 'cake', 'id': 183, 'frequency': 'f', 'synset': 'cake.n.03'}, {'name': 'calculator', 'id': 184, 'frequency': 'c', 'synset': 'calculator.n.02'}, {'name': 'calendar', 'id': 185, 'frequency': 'f', 'synset': 'calendar.n.02'}, {'name': 'calf', 'id': 186, 'frequency': 'c', 'synset': 'calf.n.01'}, {'name': 'camcorder', 'id': 187, 'frequency': 'c', 'synset': 'camcorder.n.01'}, {'name': 'camel', 'id': 188, 'frequency': 'c', 'synset': 'camel.n.01'}, {'name': 'camera', 'id': 189, 'frequency': 'f', 'synset': 'camera.n.01'}, {'name': 'camera_lens', 'id': 190, 'frequency': 'c', 'synset': 'camera_lens.n.01'}, {'name': 'camper_(vehicle)', 'id': 191, 'frequency': 'c', 'synset': 'camper.n.02'}, {'name': 'can', 'id': 192, 'frequency': 'f', 'synset': 'can.n.01'}, {'name': 'can_opener', 'id': 193, 'frequency': 'c', 'synset': 'can_opener.n.01'}, {'name': 'candle', 'id': 194, 'frequency': 'f', 'synset': 'candle.n.01'}, {'name': 'candle_holder', 'id': 195, 'frequency': 'f', 'synset': 'candlestick.n.01'}, {'name': 'candy_bar', 'id': 196, 'frequency': 'r', 'synset': 'candy_bar.n.01'}, {'name': 'candy_cane', 'id': 197, 'frequency': 'c', 'synset': 'candy_cane.n.01'}, {'name': 'walking_cane', 'id': 198, 'frequency': 'c', 'synset': 'cane.n.01'}, {'name': 'canister', 'id': 199, 'frequency': 'c', 'synset': 'canister.n.02'}, {'name': 'canoe', 'id': 200, 'frequency': 'c', 'synset': 'canoe.n.01'}, {'name': 'cantaloup', 'id': 201, 'frequency': 'c', 'synset': 'cantaloup.n.02'}, {'name': 'canteen', 'id': 202, 'frequency': 'r', 'synset': 'canteen.n.01'}, {'name': 'cap_(headwear)', 'id': 203, 'frequency': 'f', 'synset': 'cap.n.01'}, {'name': 'bottle_cap', 'id': 204, 'frequency': 'f', 'synset': 'cap.n.02'}, {'name': 'cape', 'id': 205, 'frequency': 'c', 'synset': 'cape.n.02'}, {'name': 'cappuccino', 'id': 206, 'frequency': 'c', 'synset': 'cappuccino.n.01'}, {'name': 'car_(automobile)', 'id': 207, 'frequency': 'f', 'synset': 'car.n.01'}, {'name': 'railcar_(part_of_a_train)', 'id': 208, 'frequency': 'f', 'synset': 'car.n.02'}, {'name': 'elevator_car', 'id': 209, 'frequency': 'r', 'synset': 'car.n.04'}, {'name': 'car_battery', 'id': 210, 'frequency': 'r', 'synset': 'car_battery.n.01'}, {'name': 'identity_card', 'id': 211, 'frequency': 'c', 'synset': 'card.n.02'}, {'name': 'card', 'id': 212, 'frequency': 'c', 'synset': 'card.n.03'}, {'name': 'cardigan', 'id': 213, 'frequency': 'c', 'synset': 'cardigan.n.01'}, {'name': 'cargo_ship', 'id': 214, 'frequency': 'r', 'synset': 'cargo_ship.n.01'}, {'name': 'carnation', 'id': 215, 'frequency': 'r', 'synset': 'carnation.n.01'}, {'name': 'horse_carriage', 'id': 216, 'frequency': 'c', 'synset': 'carriage.n.02'}, {'name': 'carrot', 'id': 217, 'frequency': 'f', 'synset': 'carrot.n.01'}, {'name': 'tote_bag', 'id': 218, 'frequency': 'f', 'synset': 'carryall.n.01'}, {'name': 'cart', 'id': 219, 'frequency': 'c', 'synset': 'cart.n.01'}, {'name': 'carton', 'id': 220, 'frequency': 'c', 'synset': 'carton.n.02'}, {'name': 'cash_register', 'id': 221, 'frequency': 'c', 'synset': 'cash_register.n.01'}, {'name': 'casserole', 'id': 222, 'frequency': 'r', 'synset': 'casserole.n.01'}, {'name': 'cassette', 'id': 223, 'frequency': 'r', 'synset': 'cassette.n.01'}, {'name': 'cast', 'id': 224, 'frequency': 'c', 'synset': 'cast.n.05'}, {'name': 'cat', 'id': 225, 'frequency': 'f', 'synset': 'cat.n.01'}, {'name': 'cauliflower', 'id': 226, 'frequency': 'f', 'synset': 'cauliflower.n.02'}, {'name': 'cayenne_(spice)', 'id': 227, 'frequency': 'c', 'synset': 'cayenne.n.02'}, {'name': 'CD_player', 'id': 228, 'frequency': 'c', 'synset': 'cd_player.n.01'}, {'name': 'celery', 'id': 229, 'frequency': 'f', 'synset': 'celery.n.01'}, {'name': 'cellular_telephone', 'id': 230, 'frequency': 'f', 'synset': 'cellular_telephone.n.01'}, {'name': 'chain_mail', 'id': 231, 'frequency': 'r', 'synset': 'chain_mail.n.01'}, {'name': 'chair', 'id': 232, 'frequency': 'f', 'synset': 'chair.n.01'}, {'name': 'chaise_longue', 'id': 233, 'frequency': 'r', 'synset': 'chaise_longue.n.01'}, {'name': 'chalice', 'id': 234, 'frequency': 'r', 'synset': 'chalice.n.01'}, {'name': 'chandelier', 'id': 235, 'frequency': 'f', 'synset': 'chandelier.n.01'}, {'name': 'chap', 'id': 236, 'frequency': 'r', 'synset': 'chap.n.04'}, {'name': 'checkbook', 'id': 237, 'frequency': 'r', 'synset': 'checkbook.n.01'}, {'name': 'checkerboard', 'id': 238, 'frequency': 'r', 'synset': 'checkerboard.n.01'}, {'name': 'cherry', 'id': 239, 'frequency': 'c', 'synset': 'cherry.n.03'}, {'name': 'chessboard', 'id': 240, 'frequency': 'r', 'synset': 'chessboard.n.01'}, {'name': 'chicken_(animal)', 'id': 241, 'frequency': 'c', 'synset': 'chicken.n.02'}, {'name': 'chickpea', 'id': 242, 'frequency': 'c', 'synset': 'chickpea.n.01'}, {'name': 'chili_(vegetable)', 'id': 243, 'frequency': 'c', 'synset': 'chili.n.02'}, {'name': 'chime', 'id': 244, 'frequency': 'r', 'synset': 'chime.n.01'}, {'name': 'chinaware', 'id': 245, 'frequency': 'r', 'synset': 'chinaware.n.01'}, {'name': 'crisp_(potato_chip)', 'id': 246, 'frequency': 'c', 'synset': 'chip.n.04'}, {'name': 'poker_chip', 'id': 247, 'frequency': 'r', 'synset': 'chip.n.06'}, {'name': 'chocolate_bar', 'id': 248, 'frequency': 'c', 'synset': 'chocolate_bar.n.01'}, {'name': 'chocolate_cake', 'id': 249, 'frequency': 'c', 'synset': 'chocolate_cake.n.01'}, {'name': 'chocolate_milk', 'id': 250, 'frequency': 'r', 'synset': 'chocolate_milk.n.01'}, {'name': 'chocolate_mousse', 'id': 251, 'frequency': 'r', 'synset': 'chocolate_mousse.n.01'}, {'name': 'choker', 'id': 252, 'frequency': 'f', 'synset': 'choker.n.03'}, {'name': 'chopping_board', 'id': 253, 'frequency': 'f', 'synset': 'chopping_board.n.01'}, {'name': 'chopstick', 'id': 254, 'frequency': 'f', 'synset': 'chopstick.n.01'}, {'name': 'Christmas_tree', 'id': 255, 'frequency': 'f', 'synset': 'christmas_tree.n.05'}, {'name': 'slide', 'id': 256, 'frequency': 'c', 'synset': 'chute.n.02'}, {'name': 'cider', 'id': 257, 'frequency': 'r', 'synset': 'cider.n.01'}, {'name': 'cigar_box', 'id': 258, 'frequency': 'r', 'synset': 'cigar_box.n.01'}, {'name': 'cigarette', 'id': 259, 'frequency': 'f', 'synset': 'cigarette.n.01'}, {'name': 'cigarette_case', 'id': 260, 'frequency': 'c', 'synset': 'cigarette_case.n.01'}, {'name': 'cistern', 'id': 261, 'frequency': 'f', 'synset': 'cistern.n.02'}, {'name': 'clarinet', 'id': 262, 'frequency': 'r', 'synset': 'clarinet.n.01'}, {'name': 'clasp', 'id': 263, 'frequency': 'c', 'synset': 'clasp.n.01'}, {'name': 'cleansing_agent', 'id': 264, 'frequency': 'c', 'synset': 'cleansing_agent.n.01'}, {'name': 'cleat_(for_securing_rope)', 'id': 265, 'frequency': 'r', 'synset': 'cleat.n.02'}, {'name': 'clementine', 'id': 266, 'frequency': 'r', 'synset': 'clementine.n.01'}, {'name': 'clip', 'id': 267, 'frequency': 'c', 'synset': 'clip.n.03'}, {'name': 'clipboard', 'id': 268, 'frequency': 'c', 'synset': 'clipboard.n.01'}, {'name': 'clippers_(for_plants)', 'id': 269, 'frequency': 'r', 'synset': 'clipper.n.03'}, {'name': 'cloak', 'id': 270, 'frequency': 'r', 'synset': 'cloak.n.02'}, {'name': 'clock', 'id': 271, 'frequency': 'f', 'synset': 'clock.n.01'}, {'name': 'clock_tower', 'id': 272, 'frequency': 'f', 'synset': 'clock_tower.n.01'}, {'name': 'clothes_hamper', 'id': 273, 'frequency': 'c', 'synset': 'clothes_hamper.n.01'}, {'name': 'clothespin', 'id': 274, 'frequency': 'c', 'synset': 'clothespin.n.01'}, {'name': 'clutch_bag', 'id': 275, 'frequency': 'r', 'synset': 'clutch_bag.n.01'}, {'name': 'coaster', 'id': 276, 'frequency': 'f', 'synset': 'coaster.n.03'}, {'name': 'coat', 'id': 277, 'frequency': 'f', 'synset': 'coat.n.01'}, {'name': 'coat_hanger', 'id': 278, 'frequency': 'c', 'synset': 'coat_hanger.n.01'}, {'name': 'coatrack', 'id': 279, 'frequency': 'c', 'synset': 'coatrack.n.01'}, {'name': 'cock', 'id': 280, 'frequency': 'c', 'synset': 'cock.n.04'}, {'name': 'cockroach', 'id': 281, 'frequency': 'r', 'synset': 'cockroach.n.01'}, {'name': 'cocoa_(beverage)', 'id': 282, 'frequency': 'r', 'synset': 'cocoa.n.01'}, {'name': 'coconut', 'id': 283, 'frequency': 'c', 'synset': 'coconut.n.02'}, {'name': 'coffee_maker', 'id': 284, 'frequency': 'f', 'synset': 'coffee_maker.n.01'}, {'name': 'coffee_table', 'id': 285, 'frequency': 'f', 'synset': 'coffee_table.n.01'}, {'name': 'coffeepot', 'id': 286, 'frequency': 'c', 'synset': 'coffeepot.n.01'}, {'name': 'coil', 'id': 287, 'frequency': 'r', 'synset': 'coil.n.05'}, {'name': 'coin', 'id': 288, 'frequency': 'c', 'synset': 'coin.n.01'}, {'name': 'colander', 'id': 289, 'frequency': 'c', 'synset': 'colander.n.01'}, {'name': 'coleslaw', 'id': 290, 'frequency': 'c', 'synset': 'coleslaw.n.01'}, {'name': 'coloring_material', 'id': 291, 'frequency': 'r', 'synset': 'coloring_material.n.01'}, {'name': 'combination_lock', 'id': 292, 'frequency': 'r', 'synset': 'combination_lock.n.01'}, {'name': 'pacifier', 'id': 293, 'frequency': 'c', 'synset': 'comforter.n.04'}, {'name': 'comic_book', 'id': 294, 'frequency': 'r', 'synset': 'comic_book.n.01'}, {'name': 'compass', 'id': 295, 'frequency': 'r', 'synset': 'compass.n.01'}, {'name': 'computer_keyboard', 'id': 296, 'frequency': 'f', 'synset': 'computer_keyboard.n.01'}, {'name': 'condiment', 'id': 297, 'frequency': 'f', 'synset': 'condiment.n.01'}, {'name': 'cone', 'id': 298, 'frequency': 'f', 'synset': 'cone.n.01'}, {'name': 'control', 'id': 299, 'frequency': 'f', 'synset': 'control.n.09'}, {'name': 'convertible_(automobile)', 'id': 300, 'frequency': 'r', 'synset': 'convertible.n.01'}, {'name': 'sofa_bed', 'id': 301, 'frequency': 'r', 'synset': 'convertible.n.03'}, {'name': 'cooker', 'id': 302, 'frequency': 'r', 'synset': 'cooker.n.01'}, {'name': 'cookie', 'id': 303, 'frequency': 'f', 'synset': 'cookie.n.01'}, {'name': 'cooking_utensil', 'id': 304, 'frequency': 'r', 'synset': 'cooking_utensil.n.01'}, {'name': 'cooler_(for_food)', 'id': 305, 'frequency': 'f', 'synset': 'cooler.n.01'}, {'name': 'cork_(bottle_plug)', 'id': 306, 'frequency': 'f', 'synset': 'cork.n.04'}, {'name': 'corkboard', 'id': 307, 'frequency': 'r', 'synset': 'corkboard.n.01'}, {'name': 'corkscrew', 'id': 308, 'frequency': 'c', 'synset': 'corkscrew.n.01'}, {'name': 'edible_corn', 'id': 309, 'frequency': 'f', 'synset': 'corn.n.03'}, {'name': 'cornbread', 'id': 310, 'frequency': 'r', 'synset': 'cornbread.n.01'}, {'name': 'cornet', 'id': 311, 'frequency': 'c', 'synset': 'cornet.n.01'}, {'name': 'cornice', 'id': 312, 'frequency': 'c', 'synset': 'cornice.n.01'}, {'name': 'cornmeal', 'id': 313, 'frequency': 'r', 'synset': 'cornmeal.n.01'}, {'name': 'corset', 'id': 314, 'frequency': 'c', 'synset': 'corset.n.01'}, {'name': 'costume', 'id': 315, 'frequency': 'c', 'synset': 'costume.n.04'}, {'name': 'cougar', 'id': 316, 'frequency': 'r', 'synset': 'cougar.n.01'}, {'name': 'coverall', 'id': 317, 'frequency': 'r', 'synset': 'coverall.n.01'}, {'name': 'cowbell', 'id': 318, 'frequency': 'c', 'synset': 'cowbell.n.01'}, {'name': 'cowboy_hat', 'id': 319, 'frequency': 'f', 'synset': 'cowboy_hat.n.01'}, {'name': 'crab_(animal)', 'id': 320, 'frequency': 'c', 'synset': 'crab.n.01'}, {'name': 'crabmeat', 'id': 321, 'frequency': 'r', 'synset': 'crab.n.05'}, {'name': 'cracker', 'id': 322, 'frequency': 'c', 'synset': 'cracker.n.01'}, {'name': 'crape', 'id': 323, 'frequency': 'r', 'synset': 'crape.n.01'}, {'name': 'crate', 'id': 324, 'frequency': 'f', 'synset': 'crate.n.01'}, {'name': 'crayon', 'id': 325, 'frequency': 'c', 'synset': 'crayon.n.01'}, {'name': 'cream_pitcher', 'id': 326, 'frequency': 'r', 'synset': 'cream_pitcher.n.01'}, {'name': 'crescent_roll', 'id': 327, 'frequency': 'c', 'synset': 'crescent_roll.n.01'}, {'name': 'crib', 'id': 328, 'frequency': 'c', 'synset': 'crib.n.01'}, {'name': 'crock_pot', 'id': 329, 'frequency': 'c', 'synset': 'crock.n.03'}, {'name': 'crossbar', 'id': 330, 'frequency': 'f', 'synset': 'crossbar.n.01'}, {'name': 'crouton', 'id': 331, 'frequency': 'r', 'synset': 'crouton.n.01'}, {'name': 'crow', 'id': 332, 'frequency': 'c', 'synset': 'crow.n.01'}, {'name': 'crowbar', 'id': 333, 'frequency': 'r', 'synset': 'crowbar.n.01'}, {'name': 'crown', 'id': 334, 'frequency': 'c', 'synset': 'crown.n.04'}, {'name': 'crucifix', 'id': 335, 'frequency': 'c', 'synset': 'crucifix.n.01'}, {'name': 'cruise_ship', 'id': 336, 'frequency': 'c', 'synset': 'cruise_ship.n.01'}, {'name': 'police_cruiser', 'id': 337, 'frequency': 'c', 'synset': 'cruiser.n.01'}, {'name': 'crumb', 'id': 338, 'frequency': 'f', 'synset': 'crumb.n.03'}, {'name': 'crutch', 'id': 339, 'frequency': 'c', 'synset': 'crutch.n.01'}, {'name': 'cub_(animal)', 'id': 340, 'frequency': 'c', 'synset': 'cub.n.03'}, {'name': 'cube', 'id': 341, 'frequency': 'c', 'synset': 'cube.n.05'}, {'name': 'cucumber', 'id': 342, 'frequency': 'f', 'synset': 'cucumber.n.02'}, {'name': 'cufflink', 'id': 343, 'frequency': 'c', 'synset': 'cufflink.n.01'}, {'name': 'cup', 'id': 344, 'frequency': 'f', 'synset': 'cup.n.01'}, {'name': 'trophy_cup', 'id': 345, 'frequency': 'c', 'synset': 'cup.n.08'}, {'name': 'cupboard', 'id': 346, 'frequency': 'f', 'synset': 'cupboard.n.01'}, {'name': 'cupcake', 'id': 347, 'frequency': 'f', 'synset': 'cupcake.n.01'}, {'name': 'hair_curler', 'id': 348, 'frequency': 'r', 'synset': 'curler.n.01'}, {'name': 'curling_iron', 'id': 349, 'frequency': 'r', 'synset': 'curling_iron.n.01'}, {'name': 'curtain', 'id': 350, 'frequency': 'f', 'synset': 'curtain.n.01'}, {'name': 'cushion', 'id': 351, 'frequency': 'f', 'synset': 'cushion.n.03'}, {'name': 'cylinder', 'id': 352, 'frequency': 'r', 'synset': 'cylinder.n.04'}, {'name': 'cymbal', 'id': 353, 'frequency': 'r', 'synset': 'cymbal.n.01'}, {'name': 'dagger', 'id': 354, 'frequency': 'r', 'synset': 'dagger.n.01'}, {'name': 'dalmatian', 'id': 355, 'frequency': 'r', 'synset': 'dalmatian.n.02'}, {'name': 'dartboard', 'id': 356, 'frequency': 'c', 'synset': 'dartboard.n.01'}, {'name': 'date_(fruit)', 'id': 357, 'frequency': 'r', 'synset': 'date.n.08'}, {'name': 'deck_chair', 'id': 358, 'frequency': 'f', 'synset': 'deck_chair.n.01'}, {'name': 'deer', 'id': 359, 'frequency': 'c', 'synset': 'deer.n.01'}, {'name': 'dental_floss', 'id': 360, 'frequency': 'c', 'synset': 'dental_floss.n.01'}, {'name': 'desk', 'id': 361, 'frequency': 'f', 'synset': 'desk.n.01'}, {'name': 'detergent', 'id': 362, 'frequency': 'r', 'synset': 'detergent.n.01'}, {'name': 'diaper', 'id': 363, 'frequency': 'c', 'synset': 'diaper.n.01'}, {'name': 'diary', 'id': 364, 'frequency': 'r', 'synset': 'diary.n.01'}, {'name': 'die', 'id': 365, 'frequency': 'r', 'synset': 'die.n.01'}, {'name': 'dinghy', 'id': 366, 'frequency': 'r', 'synset': 'dinghy.n.01'}, {'name': 'dining_table', 'id': 367, 'frequency': 'f', 'synset': 'dining_table.n.01'}, {'name': 'tux', 'id': 368, 'frequency': 'r', 'synset': 'dinner_jacket.n.01'}, {'name': 'dish', 'id': 369, 'frequency': 'f', 'synset': 'dish.n.01'}, {'name': 'dish_antenna', 'id': 370, 'frequency': 'c', 'synset': 'dish.n.05'}, {'name': 'dishrag', 'id': 371, 'frequency': 'c', 'synset': 'dishrag.n.01'}, {'name': 'dishtowel', 'id': 372, 'frequency': 'f', 'synset': 'dishtowel.n.01'}, {'name': 'dishwasher', 'id': 373, 'frequency': 'f', 'synset': 'dishwasher.n.01'}, {'name': 'dishwasher_detergent', 'id': 374, 'frequency': 'r', 'synset': 'dishwasher_detergent.n.01'}, {'name': 'dispenser', 'id': 375, 'frequency': 'f', 'synset': 'dispenser.n.01'}, {'name': 'diving_board', 'id': 376, 'frequency': 'r', 'synset': 'diving_board.n.01'}, {'name': 'Dixie_cup', 'id': 377, 'frequency': 'f', 'synset': 'dixie_cup.n.01'}, {'name': 'dog', 'id': 378, 'frequency': 'f', 'synset': 'dog.n.01'}, {'name': 'dog_collar', 'id': 379, 'frequency': 'f', 'synset': 'dog_collar.n.01'}, {'name': 'doll', 'id': 380, 'frequency': 'f', 'synset': 'doll.n.01'}, {'name': 'dollar', 'id': 381, 'frequency': 'r', 'synset': 'dollar.n.02'}, {'name': 'dollhouse', 'id': 382, 'frequency': 'r', 'synset': 'dollhouse.n.01'}, {'name': 'dolphin', 'id': 383, 'frequency': 'c', 'synset': 'dolphin.n.02'}, {'name': 'domestic_ass', 'id': 384, 'frequency': 'c', 'synset': 'domestic_ass.n.01'}, {'name': 'doorknob', 'id': 385, 'frequency': 'f', 'synset': 'doorknob.n.01'}, {'name': 'doormat', 'id': 386, 'frequency': 'c', 'synset': 'doormat.n.02'}, {'name': 'doughnut', 'id': 387, 'frequency': 'f', 'synset': 'doughnut.n.02'}, {'name': 'dove', 'id': 388, 'frequency': 'r', 'synset': 'dove.n.01'}, {'name': 'dragonfly', 'id': 389, 'frequency': 'r', 'synset': 'dragonfly.n.01'}, {'name': 'drawer', 'id': 390, 'frequency': 'f', 'synset': 'drawer.n.01'}, {'name': 'underdrawers', 'id': 391, 'frequency': 'c', 'synset': 'drawers.n.01'}, {'name': 'dress', 'id': 392, 'frequency': 'f', 'synset': 'dress.n.01'}, {'name': 'dress_hat', 'id': 393, 'frequency': 'c', 'synset': 'dress_hat.n.01'}, {'name': 'dress_suit', 'id': 394, 'frequency': 'f', 'synset': 'dress_suit.n.01'}, {'name': 'dresser', 'id': 395, 'frequency': 'f', 'synset': 'dresser.n.05'}, {'name': 'drill', 'id': 396, 'frequency': 'c', 'synset': 'drill.n.01'}, {'name': 'drone', 'id': 397, 'frequency': 'r', 'synset': 'drone.n.04'}, {'name': 'dropper', 'id': 398, 'frequency': 'r', 'synset': 'dropper.n.01'}, {'name': 'drum_(musical_instrument)', 'id': 399, 'frequency': 'c', 'synset': 'drum.n.01'}, {'name': 'drumstick', 'id': 400, 'frequency': 'r', 'synset': 'drumstick.n.02'}, {'name': 'duck', 'id': 401, 'frequency': 'f', 'synset': 'duck.n.01'}, {'name': 'duckling', 'id': 402, 'frequency': 'c', 'synset': 'duckling.n.02'}, {'name': 'duct_tape', 'id': 403, 'frequency': 'c', 'synset': 'duct_tape.n.01'}, {'name': 'duffel_bag', 'id': 404, 'frequency': 'f', 'synset': 'duffel_bag.n.01'}, {'name': 'dumbbell', 'id': 405, 'frequency': 'r', 'synset': 'dumbbell.n.01'}, {'name': 'dumpster', 'id': 406, 'frequency': 'c', 'synset': 'dumpster.n.01'}, {'name': 'dustpan', 'id': 407, 'frequency': 'r', 'synset': 'dustpan.n.02'}, {'name': 'eagle', 'id': 408, 'frequency': 'c', 'synset': 'eagle.n.01'}, {'name': 'earphone', 'id': 409, 'frequency': 'f', 'synset': 'earphone.n.01'}, {'name': 'earplug', 'id': 410, 'frequency': 'r', 'synset': 'earplug.n.01'}, {'name': 'earring', 'id': 411, 'frequency': 'f', 'synset': 'earring.n.01'}, {'name': 'easel', 'id': 412, 'frequency': 'c', 'synset': 'easel.n.01'}, {'name': 'eclair', 'id': 413, 'frequency': 'r', 'synset': 'eclair.n.01'}, {'name': 'eel', 'id': 414, 'frequency': 'r', 'synset': 'eel.n.01'}, {'name': 'egg', 'id': 415, 'frequency': 'f', 'synset': 'egg.n.02'}, {'name': 'egg_roll', 'id': 416, 'frequency': 'r', 'synset': 'egg_roll.n.01'}, {'name': 'egg_yolk', 'id': 417, 'frequency': 'c', 'synset': 'egg_yolk.n.01'}, {'name': 'eggbeater', 'id': 418, 'frequency': 'c', 'synset': 'eggbeater.n.02'}, {'name': 'eggplant', 'id': 419, 'frequency': 'c', 'synset': 'eggplant.n.01'}, {'name': 'electric_chair', 'id': 420, 'frequency': 'r', 'synset': 'electric_chair.n.01'}, {'name': 'refrigerator', 'id': 421, 'frequency': 'f', 'synset': 'electric_refrigerator.n.01'}, {'name': 'elephant', 'id': 422, 'frequency': 'f', 'synset': 'elephant.n.01'}, {'name': 'elk', 'id': 423, 'frequency': 'c', 'synset': 'elk.n.01'}, {'name': 'envelope', 'id': 424, 'frequency': 'c', 'synset': 'envelope.n.01'}, {'name': 'eraser', 'id': 425, 'frequency': 'c', 'synset': 'eraser.n.01'}, {'name': 'escargot', 'id': 426, 'frequency': 'r', 'synset': 'escargot.n.01'}, {'name': 'eyepatch', 'id': 427, 'frequency': 'r', 'synset': 'eyepatch.n.01'}, {'name': 'falcon', 'id': 428, 'frequency': 'r', 'synset': 'falcon.n.01'}, {'name': 'fan', 'id': 429, 'frequency': 'f', 'synset': 'fan.n.01'}, {'name': 'faucet', 'id': 430, 'frequency': 'f', 'synset': 'faucet.n.01'}, {'name': 'fedora', 'id': 431, 'frequency': 'r', 'synset': 'fedora.n.01'}, {'name': 'ferret', 'id': 432, 'frequency': 'r', 'synset': 'ferret.n.02'}, {'name': 'Ferris_wheel', 'id': 433, 'frequency': 'c', 'synset': 'ferris_wheel.n.01'}, {'name': 'ferry', 'id': 434, 'frequency': 'c', 'synset': 'ferry.n.01'}, {'name': 'fig_(fruit)', 'id': 435, 'frequency': 'r', 'synset': 'fig.n.04'}, {'name': 'fighter_jet', 'id': 436, 'frequency': 'c', 'synset': 'fighter.n.02'}, {'name': 'figurine', 'id': 437, 'frequency': 'f', 'synset': 'figurine.n.01'}, {'name': 'file_cabinet', 'id': 438, 'frequency': 'c', 'synset': 'file.n.03'}, {'name': 'file_(tool)', 'id': 439, 'frequency': 'r', 'synset': 'file.n.04'}, {'name': 'fire_alarm', 'id': 440, 'frequency': 'f', 'synset': 'fire_alarm.n.02'}, {'name': 'fire_engine', 'id': 441, 'frequency': 'f', 'synset': 'fire_engine.n.01'}, {'name': 'fire_extinguisher', 'id': 442, 'frequency': 'f', 'synset': 'fire_extinguisher.n.01'}, {'name': 'fire_hose', 'id': 443, 'frequency': 'c', 'synset': 'fire_hose.n.01'}, {'name': 'fireplace', 'id': 444, 'frequency': 'f', 'synset': 'fireplace.n.01'}, {'name': 'fireplug', 'id': 445, 'frequency': 'f', 'synset': 'fireplug.n.01'}, {'name': 'first-aid_kit', 'id': 446, 'frequency': 'r', 'synset': 'first-aid_kit.n.01'}, {'name': 'fish', 'id': 447, 'frequency': 'f', 'synset': 'fish.n.01'}, {'name': 'fish_(food)', 'id': 448, 'frequency': 'c', 'synset': 'fish.n.02'}, {'name': 'fishbowl', 'id': 449, 'frequency': 'r', 'synset': 'fishbowl.n.02'}, {'name': 'fishing_rod', 'id': 450, 'frequency': 'c', 'synset': 'fishing_rod.n.01'}, {'name': 'flag', 'id': 451, 'frequency': 'f', 'synset': 'flag.n.01'}, {'name': 'flagpole', 'id': 452, 'frequency': 'f', 'synset': 'flagpole.n.02'}, {'name': 'flamingo', 'id': 453, 'frequency': 'c', 'synset': 'flamingo.n.01'}, {'name': 'flannel', 'id': 454, 'frequency': 'c', 'synset': 'flannel.n.01'}, {'name': 'flap', 'id': 455, 'frequency': 'c', 'synset': 'flap.n.01'}, {'name': 'flash', 'id': 456, 'frequency': 'r', 'synset': 'flash.n.10'}, {'name': 'flashlight', 'id': 457, 'frequency': 'c', 'synset': 'flashlight.n.01'}, {'name': 'fleece', 'id': 458, 'frequency': 'r', 'synset': 'fleece.n.03'}, {'name': 'flip-flop_(sandal)', 'id': 459, 'frequency': 'f', 'synset': 'flip-flop.n.02'}, {'name': 'flipper_(footwear)', 'id': 460, 'frequency': 'c', 'synset': 'flipper.n.01'}, {'name': 'flower_arrangement', 'id': 461, 'frequency': 'f', 'synset': 'flower_arrangement.n.01'}, {'name': 'flute_glass', 'id': 462, 'frequency': 'c', 'synset': 'flute.n.02'}, {'name': 'foal', 'id': 463, 'frequency': 'c', 'synset': 'foal.n.01'}, {'name': 'folding_chair', 'id': 464, 'frequency': 'c', 'synset': 'folding_chair.n.01'}, {'name': 'food_processor', 'id': 465, 'frequency': 'c', 'synset': 'food_processor.n.01'}, {'name': 'football_(American)', 'id': 466, 'frequency': 'c', 'synset': 'football.n.02'}, {'name': 'football_helmet', 'id': 467, 'frequency': 'r', 'synset': 'football_helmet.n.01'}, {'name': 'footstool', 'id': 468, 'frequency': 'c', 'synset': 'footstool.n.01'}, {'name': 'fork', 'id': 469, 'frequency': 'f', 'synset': 'fork.n.01'}, {'name': 'forklift', 'id': 470, 'frequency': 'c', 'synset': 'forklift.n.01'}, {'name': 'freight_car', 'id': 471, 'frequency': 'c', 'synset': 'freight_car.n.01'}, {'name': 'French_toast', 'id': 472, 'frequency': 'c', 'synset': 'french_toast.n.01'}, {'name': 'freshener', 'id': 473, 'frequency': 'c', 'synset': 'freshener.n.01'}, {'name': 'frisbee', 'id': 474, 'frequency': 'f', 'synset': 'frisbee.n.01'}, {'name': 'frog', 'id': 475, 'frequency': 'c', 'synset': 'frog.n.01'}, {'name': 'fruit_juice', 'id': 476, 'frequency': 'c', 'synset': 'fruit_juice.n.01'}, {'name': 'frying_pan', 'id': 477, 'frequency': 'f', 'synset': 'frying_pan.n.01'}, {'name': 'fudge', 'id': 478, 'frequency': 'r', 'synset': 'fudge.n.01'}, {'name': 'funnel', 'id': 479, 'frequency': 'r', 'synset': 'funnel.n.02'}, {'name': 'futon', 'id': 480, 'frequency': 'r', 'synset': 'futon.n.01'}, {'name': 'gag', 'id': 481, 'frequency': 'r', 'synset': 'gag.n.02'}, {'name': 'garbage', 'id': 482, 'frequency': 'r', 'synset': 'garbage.n.03'}, {'name': 'garbage_truck', 'id': 483, 'frequency': 'c', 'synset': 'garbage_truck.n.01'}, {'name': 'garden_hose', 'id': 484, 'frequency': 'c', 'synset': 'garden_hose.n.01'}, {'name': 'gargle', 'id': 485, 'frequency': 'c', 'synset': 'gargle.n.01'}, {'name': 'gargoyle', 'id': 486, 'frequency': 'r', 'synset': 'gargoyle.n.02'}, {'name': 'garlic', 'id': 487, 'frequency': 'c', 'synset': 'garlic.n.02'}, {'name': 'gasmask', 'id': 488, 'frequency': 'r', 'synset': 'gasmask.n.01'}, {'name': 'gazelle', 'id': 489, 'frequency': 'c', 'synset': 'gazelle.n.01'}, {'name': 'gelatin', 'id': 490, 'frequency': 'c', 'synset': 'gelatin.n.02'}, {'name': 'gemstone', 'id': 491, 'frequency': 'r', 'synset': 'gem.n.02'}, {'name': 'generator', 'id': 492, 'frequency': 'r', 'synset': 'generator.n.02'}, {'name': 'giant_panda', 'id': 493, 'frequency': 'c', 'synset': 'giant_panda.n.01'}, {'name': 'gift_wrap', 'id': 494, 'frequency': 'c', 'synset': 'gift_wrap.n.01'}, {'name': 'ginger', 'id': 495, 'frequency': 'c', 'synset': 'ginger.n.03'}, {'name': 'giraffe', 'id': 496, 'frequency': 'f', 'synset': 'giraffe.n.01'}, {'name': 'cincture', 'id': 497, 'frequency': 'c', 'synset': 'girdle.n.02'}, {'name': 'glass_(drink_container)', 'id': 498, 'frequency': 'f', 'synset': 'glass.n.02'}, {'name': 'globe', 'id': 499, 'frequency': 'c', 'synset': 'globe.n.03'}, {'name': 'glove', 'id': 500, 'frequency': 'f', 'synset': 'glove.n.02'}, {'name': 'goat', 'id': 501, 'frequency': 'c', 'synset': 'goat.n.01'}, {'name': 'goggles', 'id': 502, 'frequency': 'f', 'synset': 'goggles.n.01'}, {'name': 'goldfish', 'id': 503, 'frequency': 'r', 'synset': 'goldfish.n.01'}, {'name': 'golf_club', 'id': 504, 'frequency': 'c', 'synset': 'golf_club.n.02'}, {'name': 'golfcart', 'id': 505, 'frequency': 'c', 'synset': 'golfcart.n.01'}, {'name': 'gondola_(boat)', 'id': 506, 'frequency': 'r', 'synset': 'gondola.n.02'}, {'name': 'goose', 'id': 507, 'frequency': 'c', 'synset': 'goose.n.01'}, {'name': 'gorilla', 'id': 508, 'frequency': 'r', 'synset': 'gorilla.n.01'}, {'name': 'gourd', 'id': 509, 'frequency': 'r', 'synset': 'gourd.n.02'}, {'name': 'grape', 'id': 510, 'frequency': 'f', 'synset': 'grape.n.01'}, {'name': 'grater', 'id': 511, 'frequency': 'c', 'synset': 'grater.n.01'}, {'name': 'gravestone', 'id': 512, 'frequency': 'c', 'synset': 'gravestone.n.01'}, {'name': 'gravy_boat', 'id': 513, 'frequency': 'r', 'synset': 'gravy_boat.n.01'}, {'name': 'green_bean', 'id': 514, 'frequency': 'f', 'synset': 'green_bean.n.02'}, {'name': 'green_onion', 'id': 515, 'frequency': 'f', 'synset': 'green_onion.n.01'}, {'name': 'griddle', 'id': 516, 'frequency': 'r', 'synset': 'griddle.n.01'}, {'name': 'grill', 'id': 517, 'frequency': 'f', 'synset': 'grill.n.02'}, {'name': 'grits', 'id': 518, 'frequency': 'r', 'synset': 'grits.n.01'}, {'name': 'grizzly', 'id': 519, 'frequency': 'c', 'synset': 'grizzly.n.01'}, {'name': 'grocery_bag', 'id': 520, 'frequency': 'c', 'synset': 'grocery_bag.n.01'}, {'name': 'guitar', 'id': 521, 'frequency': 'f', 'synset': 'guitar.n.01'}, {'name': 'gull', 'id': 522, 'frequency': 'c', 'synset': 'gull.n.02'}, {'name': 'gun', 'id': 523, 'frequency': 'c', 'synset': 'gun.n.01'}, {'name': 'hairbrush', 'id': 524, 'frequency': 'f', 'synset': 'hairbrush.n.01'}, {'name': 'hairnet', 'id': 525, 'frequency': 'c', 'synset': 'hairnet.n.01'}, {'name': 'hairpin', 'id': 526, 'frequency': 'c', 'synset': 'hairpin.n.01'}, {'name': 'halter_top', 'id': 527, 'frequency': 'r', 'synset': 'halter.n.03'}, {'name': 'ham', 'id': 528, 'frequency': 'f', 'synset': 'ham.n.01'}, {'name': 'hamburger', 'id': 529, 'frequency': 'c', 'synset': 'hamburger.n.01'}, {'name': 'hammer', 'id': 530, 'frequency': 'c', 'synset': 'hammer.n.02'}, {'name': 'hammock', 'id': 531, 'frequency': 'c', 'synset': 'hammock.n.02'}, {'name': 'hamper', 'id': 532, 'frequency': 'r', 'synset': 'hamper.n.02'}, {'name': 'hamster', 'id': 533, 'frequency': 'c', 'synset': 'hamster.n.01'}, {'name': 'hair_dryer', 'id': 534, 'frequency': 'f', 'synset': 'hand_blower.n.01'}, {'name': 'hand_glass', 'id': 535, 'frequency': 'r', 'synset': 'hand_glass.n.01'}, {'name': 'hand_towel', 'id': 536, 'frequency': 'f', 'synset': 'hand_towel.n.01'}, {'name': 'handcart', 'id': 537, 'frequency': 'c', 'synset': 'handcart.n.01'}, {'name': 'handcuff', 'id': 538, 'frequency': 'r', 'synset': 'handcuff.n.01'}, {'name': 'handkerchief', 'id': 539, 'frequency': 'c', 'synset': 'handkerchief.n.01'}, {'name': 'handle', 'id': 540, 'frequency': 'f', 'synset': 'handle.n.01'}, {'name': 'handsaw', 'id': 541, 'frequency': 'r', 'synset': 'handsaw.n.01'}, {'name': 'hardback_book', 'id': 542, 'frequency': 'r', 'synset': 'hardback.n.01'}, {'name': 'harmonium', 'id': 543, 'frequency': 'r', 'synset': 'harmonium.n.01'}, {'name': 'hat', 'id': 544, 'frequency': 'f', 'synset': 'hat.n.01'}, {'name': 'hatbox', 'id': 545, 'frequency': 'r', 'synset': 'hatbox.n.01'}, {'name': 'veil', 'id': 546, 'frequency': 'c', 'synset': 'head_covering.n.01'}, {'name': 'headband', 'id': 547, 'frequency': 'f', 'synset': 'headband.n.01'}, {'name': 'headboard', 'id': 548, 'frequency': 'f', 'synset': 'headboard.n.01'}, {'name': 'headlight', 'id': 549, 'frequency': 'f', 'synset': 'headlight.n.01'}, {'name': 'headscarf', 'id': 550, 'frequency': 'c', 'synset': 'headscarf.n.01'}, {'name': 'headset', 'id': 551, 'frequency': 'r', 'synset': 'headset.n.01'}, {'name': 'headstall_(for_horses)', 'id': 552, 'frequency': 'c', 'synset': 'headstall.n.01'}, {'name': 'heart', 'id': 553, 'frequency': 'c', 'synset': 'heart.n.02'}, {'name': 'heater', 'id': 554, 'frequency': 'c', 'synset': 'heater.n.01'}, {'name': 'helicopter', 'id': 555, 'frequency': 'c', 'synset': 'helicopter.n.01'}, {'name': 'helmet', 'id': 556, 'frequency': 'f', 'synset': 'helmet.n.02'}, {'name': 'heron', 'id': 557, 'frequency': 'r', 'synset': 'heron.n.02'}, {'name': 'highchair', 'id': 558, 'frequency': 'c', 'synset': 'highchair.n.01'}, {'name': 'hinge', 'id': 559, 'frequency': 'f', 'synset': 'hinge.n.01'}, {'name': 'hippopotamus', 'id': 560, 'frequency': 'r', 'synset': 'hippopotamus.n.01'}, {'name': 'hockey_stick', 'id': 561, 'frequency': 'r', 'synset': 'hockey_stick.n.01'}, {'name': 'hog', 'id': 562, 'frequency': 'c', 'synset': 'hog.n.03'}, {'name': 'home_plate_(baseball)', 'id': 563, 'frequency': 'f', 'synset': 'home_plate.n.01'}, {'name': 'honey', 'id': 564, 'frequency': 'c', 'synset': 'honey.n.01'}, {'name': 'fume_hood', 'id': 565, 'frequency': 'f', 'synset': 'hood.n.06'}, {'name': 'hook', 'id': 566, 'frequency': 'f', 'synset': 'hook.n.05'}, {'name': 'hookah', 'id': 567, 'frequency': 'r', 'synset': 'hookah.n.01'}, {'name': 'hornet', 'id': 568, 'frequency': 'r', 'synset': 'hornet.n.01'}, {'name': 'horse', 'id': 569, 'frequency': 'f', 'synset': 'horse.n.01'}, {'name': 'hose', 'id': 570, 'frequency': 'f', 'synset': 'hose.n.03'}, {'name': 'hot-air_balloon', 'id': 571, 'frequency': 'r', 'synset': 'hot-air_balloon.n.01'}, {'name': 'hotplate', 'id': 572, 'frequency': 'r', 'synset': 'hot_plate.n.01'}, {'name': 'hot_sauce', 'id': 573, 'frequency': 'c', 'synset': 'hot_sauce.n.01'}, {'name': 'hourglass', 'id': 574, 'frequency': 'r', 'synset': 'hourglass.n.01'}, {'name': 'houseboat', 'id': 575, 'frequency': 'r', 'synset': 'houseboat.n.01'}, {'name': 'hummingbird', 'id': 576, 'frequency': 'c', 'synset': 'hummingbird.n.01'}, {'name': 'hummus', 'id': 577, 'frequency': 'r', 'synset': 'hummus.n.01'}, {'name': 'polar_bear', 'id': 578, 'frequency': 'f', 'synset': 'ice_bear.n.01'}, {'name': 'icecream', 'id': 579, 'frequency': 'c', 'synset': 'ice_cream.n.01'}, {'name': 'popsicle', 'id': 580, 'frequency': 'r', 'synset': 'ice_lolly.n.01'}, {'name': 'ice_maker', 'id': 581, 'frequency': 'c', 'synset': 'ice_maker.n.01'}, {'name': 'ice_pack', 'id': 582, 'frequency': 'r', 'synset': 'ice_pack.n.01'}, {'name': 'ice_skate', 'id': 583, 'frequency': 'r', 'synset': 'ice_skate.n.01'}, {'name': 'igniter', 'id': 584, 'frequency': 'c', 'synset': 'igniter.n.01'}, {'name': 'inhaler', 'id': 585, 'frequency': 'r', 'synset': 'inhaler.n.01'}, {'name': 'iPod', 'id': 586, 'frequency': 'f', 'synset': 'ipod.n.01'}, {'name': 'iron_(for_clothing)', 'id': 587, 'frequency': 'c', 'synset': 'iron.n.04'}, {'name': 'ironing_board', 'id': 588, 'frequency': 'c', 'synset': 'ironing_board.n.01'}, {'name': 'jacket', 'id': 589, 'frequency': 'f', 'synset': 'jacket.n.01'}, {'name': 'jam', 'id': 590, 'frequency': 'c', 'synset': 'jam.n.01'}, {'name': 'jar', 'id': 591, 'frequency': 'f', 'synset': 'jar.n.01'}, {'name': 'jean', 'id': 592, 'frequency': 'f', 'synset': 'jean.n.01'}, {'name': 'jeep', 'id': 593, 'frequency': 'c', 'synset': 'jeep.n.01'}, {'name': 'jelly_bean', 'id': 594, 'frequency': 'r', 'synset': 'jelly_bean.n.01'}, {'name': 'jersey', 'id': 595, 'frequency': 'f', 'synset': 'jersey.n.03'}, {'name': 'jet_plane', 'id': 596, 'frequency': 'c', 'synset': 'jet.n.01'}, {'name': 'jewel', 'id': 597, 'frequency': 'r', 'synset': 'jewel.n.01'}, {'name': 'jewelry', 'id': 598, 'frequency': 'c', 'synset': 'jewelry.n.01'}, {'name': 'joystick', 'id': 599, 'frequency': 'r', 'synset': 'joystick.n.02'}, {'name': 'jumpsuit', 'id': 600, 'frequency': 'c', 'synset': 'jump_suit.n.01'}, {'name': 'kayak', 'id': 601, 'frequency': 'c', 'synset': 'kayak.n.01'}, {'name': 'keg', 'id': 602, 'frequency': 'r', 'synset': 'keg.n.02'}, {'name': 'kennel', 'id': 603, 'frequency': 'r', 'synset': 'kennel.n.01'}, {'name': 'kettle', 'id': 604, 'frequency': 'c', 'synset': 'kettle.n.01'}, {'name': 'key', 'id': 605, 'frequency': 'f', 'synset': 'key.n.01'}, {'name': 'keycard', 'id': 606, 'frequency': 'r', 'synset': 'keycard.n.01'}, {'name': 'kilt', 'id': 607, 'frequency': 'c', 'synset': 'kilt.n.01'}, {'name': 'kimono', 'id': 608, 'frequency': 'c', 'synset': 'kimono.n.01'}, {'name': 'kitchen_sink', 'id': 609, 'frequency': 'f', 'synset': 'kitchen_sink.n.01'}, {'name': 'kitchen_table', 'id': 610, 'frequency': 'r', 'synset': 'kitchen_table.n.01'}, {'name': 'kite', 'id': 611, 'frequency': 'f', 'synset': 'kite.n.03'}, {'name': 'kitten', 'id': 612, 'frequency': 'c', 'synset': 'kitten.n.01'}, {'name': 'kiwi_fruit', 'id': 613, 'frequency': 'c', 'synset': 'kiwi.n.03'}, {'name': 'knee_pad', 'id': 614, 'frequency': 'f', 'synset': 'knee_pad.n.01'}, {'name': 'knife', 'id': 615, 'frequency': 'f', 'synset': 'knife.n.01'}, {'name': 'knitting_needle', 'id': 616, 'frequency': 'r', 'synset': 'knitting_needle.n.01'}, {'name': 'knob', 'id': 617, 'frequency': 'f', 'synset': 'knob.n.02'}, {'name': 'knocker_(on_a_door)', 'id': 618, 'frequency': 'r', 'synset': 'knocker.n.05'}, {'name': 'koala', 'id': 619, 'frequency': 'r', 'synset': 'koala.n.01'}, {'name': 'lab_coat', 'id': 620, 'frequency': 'r', 'synset': 'lab_coat.n.01'}, {'name': 'ladder', 'id': 621, 'frequency': 'f', 'synset': 'ladder.n.01'}, {'name': 'ladle', 'id': 622, 'frequency': 'c', 'synset': 'ladle.n.01'}, {'name': 'ladybug', 'id': 623, 'frequency': 'c', 'synset': 'ladybug.n.01'}, {'name': 'lamb_(animal)', 'id': 624, 'frequency': 'f', 'synset': 'lamb.n.01'}, {'name': 'lamb-chop', 'id': 625, 'frequency': 'r', 'synset': 'lamb_chop.n.01'}, {'name': 'lamp', 'id': 626, 'frequency': 'f', 'synset': 'lamp.n.02'}, {'name': 'lamppost', 'id': 627, 'frequency': 'f', 'synset': 'lamppost.n.01'}, {'name': 'lampshade', 'id': 628, 'frequency': 'f', 'synset': 'lampshade.n.01'}, {'name': 'lantern', 'id': 629, 'frequency': 'c', 'synset': 'lantern.n.01'}, {'name': 'lanyard', 'id': 630, 'frequency': 'f', 'synset': 'lanyard.n.02'}, {'name': 'laptop_computer', 'id': 631, 'frequency': 'f', 'synset': 'laptop.n.01'}, {'name': 'lasagna', 'id': 632, 'frequency': 'r', 'synset': 'lasagna.n.01'}, {'name': 'latch', 'id': 633, 'frequency': 'f', 'synset': 'latch.n.02'}, {'name': 'lawn_mower', 'id': 634, 'frequency': 'r', 'synset': 'lawn_mower.n.01'}, {'name': 'leather', 'id': 635, 'frequency': 'r', 'synset': 'leather.n.01'}, {'name': 'legging_(clothing)', 'id': 636, 'frequency': 'c', 'synset': 'legging.n.01'}, {'name': 'Lego', 'id': 637, 'frequency': 'c', 'synset': 'lego.n.01'}, {'name': 'legume', 'id': 638, 'frequency': 'r', 'synset': 'legume.n.02'}, {'name': 'lemon', 'id': 639, 'frequency': 'f', 'synset': 'lemon.n.01'}, {'name': 'lemonade', 'id': 640, 'frequency': 'r', 'synset': 'lemonade.n.01'}, {'name': 'lettuce', 'id': 641, 'frequency': 'f', 'synset': 'lettuce.n.02'}, {'name': 'license_plate', 'id': 642, 'frequency': 'f', 'synset': 'license_plate.n.01'}, {'name': 'life_buoy', 'id': 643, 'frequency': 'f', 'synset': 'life_buoy.n.01'}, {'name': 'life_jacket', 'id': 644, 'frequency': 'f', 'synset': 'life_jacket.n.01'}, {'name': 'lightbulb', 'id': 645, 'frequency': 'f', 'synset': 'light_bulb.n.01'}, {'name': 'lightning_rod', 'id': 646, 'frequency': 'r', 'synset': 'lightning_rod.n.02'}, {'name': 'lime', 'id': 647, 'frequency': 'f', 'synset': 'lime.n.06'}, {'name': 'limousine', 'id': 648, 'frequency': 'r', 'synset': 'limousine.n.01'}, {'name': 'lion', 'id': 649, 'frequency': 'c', 'synset': 'lion.n.01'}, {'name': 'lip_balm', 'id': 650, 'frequency': 'c', 'synset': 'lip_balm.n.01'}, {'name': 'liquor', 'id': 651, 'frequency': 'r', 'synset': 'liquor.n.01'}, {'name': 'lizard', 'id': 652, 'frequency': 'c', 'synset': 'lizard.n.01'}, {'name': 'log', 'id': 653, 'frequency': 'f', 'synset': 'log.n.01'}, {'name': 'lollipop', 'id': 654, 'frequency': 'c', 'synset': 'lollipop.n.02'}, {'name': 'speaker_(stero_equipment)', 'id': 655, 'frequency': 'f', 'synset': 'loudspeaker.n.01'}, {'name': 'loveseat', 'id': 656, 'frequency': 'c', 'synset': 'love_seat.n.01'}, {'name': 'machine_gun', 'id': 657, 'frequency': 'r', 'synset': 'machine_gun.n.01'}, {'name': 'magazine', 'id': 658, 'frequency': 'f', 'synset': 'magazine.n.02'}, {'name': 'magnet', 'id': 659, 'frequency': 'f', 'synset': 'magnet.n.01'}, {'name': 'mail_slot', 'id': 660, 'frequency': 'c', 'synset': 'mail_slot.n.01'}, {'name': 'mailbox_(at_home)', 'id': 661, 'frequency': 'f', 'synset': 'mailbox.n.01'}, {'name': 'mallard', 'id': 662, 'frequency': 'r', 'synset': 'mallard.n.01'}, {'name': 'mallet', 'id': 663, 'frequency': 'r', 'synset': 'mallet.n.01'}, {'name': 'mammoth', 'id': 664, 'frequency': 'r', 'synset': 'mammoth.n.01'}, {'name': 'manatee', 'id': 665, 'frequency': 'r', 'synset': 'manatee.n.01'}, {'name': 'mandarin_orange', 'id': 666, 'frequency': 'c', 'synset': 'mandarin.n.05'}, {'name': 'manger', 'id': 667, 'frequency': 'c', 'synset': 'manger.n.01'}, {'name': 'manhole', 'id': 668, 'frequency': 'f', 'synset': 'manhole.n.01'}, {'name': 'map', 'id': 669, 'frequency': 'f', 'synset': 'map.n.01'}, {'name': 'marker', 'id': 670, 'frequency': 'f', 'synset': 'marker.n.03'}, {'name': 'martini', 'id': 671, 'frequency': 'r', 'synset': 'martini.n.01'}, {'name': 'mascot', 'id': 672, 'frequency': 'r', 'synset': 'mascot.n.01'}, {'name': 'mashed_potato', 'id': 673, 'frequency': 'c', 'synset': 'mashed_potato.n.01'}, {'name': 'masher', 'id': 674, 'frequency': 'r', 'synset': 'masher.n.02'}, {'name': 'mask', 'id': 675, 'frequency': 'f', 'synset': 'mask.n.04'}, {'name': 'mast', 'id': 676, 'frequency': 'f', 'synset': 'mast.n.01'}, {'name': 'mat_(gym_equipment)', 'id': 677, 'frequency': 'c', 'synset': 'mat.n.03'}, {'name': 'matchbox', 'id': 678, 'frequency': 'r', 'synset': 'matchbox.n.01'}, {'name': 'mattress', 'id': 679, 'frequency': 'f', 'synset': 'mattress.n.01'}, {'name': 'measuring_cup', 'id': 680, 'frequency': 'c', 'synset': 'measuring_cup.n.01'}, {'name': 'measuring_stick', 'id': 681, 'frequency': 'c', 'synset': 'measuring_stick.n.01'}, {'name': 'meatball', 'id': 682, 'frequency': 'c', 'synset': 'meatball.n.01'}, {'name': 'medicine', 'id': 683, 'frequency': 'c', 'synset': 'medicine.n.02'}, {'name': 'melon', 'id': 684, 'frequency': 'c', 'synset': 'melon.n.01'}, {'name': 'microphone', 'id': 685, 'frequency': 'f', 'synset': 'microphone.n.01'}, {'name': 'microscope', 'id': 686, 'frequency': 'r', 'synset': 'microscope.n.01'}, {'name': 'microwave_oven', 'id': 687, 'frequency': 'f', 'synset': 'microwave.n.02'}, {'name': 'milestone', 'id': 688, 'frequency': 'r', 'synset': 'milestone.n.01'}, {'name': 'milk', 'id': 689, 'frequency': 'f', 'synset': 'milk.n.01'}, {'name': 'milk_can', 'id': 690, 'frequency': 'r', 'synset': 'milk_can.n.01'}, {'name': 'milkshake', 'id': 691, 'frequency': 'r', 'synset': 'milkshake.n.01'}, {'name': 'minivan', 'id': 692, 'frequency': 'f', 'synset': 'minivan.n.01'}, {'name': 'mint_candy', 'id': 693, 'frequency': 'r', 'synset': 'mint.n.05'}, {'name': 'mirror', 'id': 694, 'frequency': 'f', 'synset': 'mirror.n.01'}, {'name': 'mitten', 'id': 695, 'frequency': 'c', 'synset': 'mitten.n.01'}, {'name': 'mixer_(kitchen_tool)', 'id': 696, 'frequency': 'c', 'synset': 'mixer.n.04'}, {'name': 'money', 'id': 697, 'frequency': 'c', 'synset': 'money.n.03'}, {'name': 'monitor_(computer_equipment) computer_monitor', 'id': 698, 'frequency': 'f', 'synset': 'monitor.n.04'}, {'name': 'monkey', 'id': 699, 'frequency': 'c', 'synset': 'monkey.n.01'}, {'name': 'motor', 'id': 700, 'frequency': 'f', 'synset': 'motor.n.01'}, {'name': 'motor_scooter', 'id': 701, 'frequency': 'f', 'synset': 'motor_scooter.n.01'}, {'name': 'motor_vehicle', 'id': 702, 'frequency': 'r', 'synset': 'motor_vehicle.n.01'}, {'name': 'motorcycle', 'id': 703, 'frequency': 'f', 'synset': 'motorcycle.n.01'}, {'name': 'mound_(baseball)', 'id': 704, 'frequency': 'f', 'synset': 'mound.n.01'}, {'name': 'mouse_(computer_equipment)', 'id': 705, 'frequency': 'f', 'synset': 'mouse.n.04'}, {'name': 'mousepad', 'id': 706, 'frequency': 'f', 'synset': 'mousepad.n.01'}, {'name': 'muffin', 'id': 707, 'frequency': 'c', 'synset': 'muffin.n.01'}, {'name': 'mug', 'id': 708, 'frequency': 'f', 'synset': 'mug.n.04'}, {'name': 'mushroom', 'id': 709, 'frequency': 'f', 'synset': 'mushroom.n.02'}, {'name': 'music_stool', 'id': 710, 'frequency': 'r', 'synset': 'music_stool.n.01'}, {'name': 'musical_instrument', 'id': 711, 'frequency': 'c', 'synset': 'musical_instrument.n.01'}, {'name': 'nailfile', 'id': 712, 'frequency': 'r', 'synset': 'nailfile.n.01'}, {'name': 'napkin', 'id': 713, 'frequency': 'f', 'synset': 'napkin.n.01'}, {'name': 'neckerchief', 'id': 714, 'frequency': 'r', 'synset': 'neckerchief.n.01'}, {'name': 'necklace', 'id': 715, 'frequency': 'f', 'synset': 'necklace.n.01'}, {'name': 'necktie', 'id': 716, 'frequency': 'f', 'synset': 'necktie.n.01'}, {'name': 'needle', 'id': 717, 'frequency': 'c', 'synset': 'needle.n.03'}, {'name': 'nest', 'id': 718, 'frequency': 'c', 'synset': 'nest.n.01'}, {'name': 'newspaper', 'id': 719, 'frequency': 'f', 'synset': 'newspaper.n.01'}, {'name': 'newsstand', 'id': 720, 'frequency': 'c', 'synset': 'newsstand.n.01'}, {'name': 'nightshirt', 'id': 721, 'frequency': 'c', 'synset': 'nightwear.n.01'}, {'name': 'nosebag_(for_animals)', 'id': 722, 'frequency': 'r', 'synset': 'nosebag.n.01'}, {'name': 'noseband_(for_animals)', 'id': 723, 'frequency': 'c', 'synset': 'noseband.n.01'}, {'name': 'notebook', 'id': 724, 'frequency': 'f', 'synset': 'notebook.n.01'}, {'name': 'notepad', 'id': 725, 'frequency': 'c', 'synset': 'notepad.n.01'}, {'name': 'nut', 'id': 726, 'frequency': 'f', 'synset': 'nut.n.03'}, {'name': 'nutcracker', 'id': 727, 'frequency': 'r', 'synset': 'nutcracker.n.01'}, {'name': 'oar', 'id': 728, 'frequency': 'f', 'synset': 'oar.n.01'}, {'name': 'octopus_(food)', 'id': 729, 'frequency': 'r', 'synset': 'octopus.n.01'}, {'name': 'octopus_(animal)', 'id': 730, 'frequency': 'r', 'synset': 'octopus.n.02'}, {'name': 'oil_lamp', 'id': 731, 'frequency': 'c', 'synset': 'oil_lamp.n.01'}, {'name': 'olive_oil', 'id': 732, 'frequency': 'c', 'synset': 'olive_oil.n.01'}, {'name': 'omelet', 'id': 733, 'frequency': 'r', 'synset': 'omelet.n.01'}, {'name': 'onion', 'id': 734, 'frequency': 'f', 'synset': 'onion.n.01'}, {'name': 'orange_(fruit)', 'id': 735, 'frequency': 'f', 'synset': 'orange.n.01'}, {'name': 'orange_juice', 'id': 736, 'frequency': 'c', 'synset': 'orange_juice.n.01'}, {'name': 'ostrich', 'id': 737, 'frequency': 'c', 'synset': 'ostrich.n.02'}, {'name': 'ottoman', 'id': 738, 'frequency': 'f', 'synset': 'ottoman.n.03'}, {'name': 'oven', 'id': 739, 'frequency': 'f', 'synset': 'oven.n.01'}, {'name': 'overalls_(clothing)', 'id': 740, 'frequency': 'c', 'synset': 'overall.n.01'}, {'name': 'owl', 'id': 741, 'frequency': 'c', 'synset': 'owl.n.01'}, {'name': 'packet', 'id': 742, 'frequency': 'c', 'synset': 'packet.n.03'}, {'name': 'inkpad', 'id': 743, 'frequency': 'r', 'synset': 'pad.n.03'}, {'name': 'pad', 'id': 744, 'frequency': 'c', 'synset': 'pad.n.04'}, {'name': 'paddle', 'id': 745, 'frequency': 'f', 'synset': 'paddle.n.04'}, {'name': 'padlock', 'id': 746, 'frequency': 'c', 'synset': 'padlock.n.01'}, {'name': 'paintbrush', 'id': 747, 'frequency': 'c', 'synset': 'paintbrush.n.01'}, {'name': 'painting', 'id': 748, 'frequency': 'f', 'synset': 'painting.n.01'}, {'name': 'pajamas', 'id': 749, 'frequency': 'f', 'synset': 'pajama.n.02'}, {'name': 'palette', 'id': 750, 'frequency': 'c', 'synset': 'palette.n.02'}, {'name': 'pan_(for_cooking)', 'id': 751, 'frequency': 'f', 'synset': 'pan.n.01'}, {'name': 'pan_(metal_container)', 'id': 752, 'frequency': 'r', 'synset': 'pan.n.03'}, {'name': 'pancake', 'id': 753, 'frequency': 'c', 'synset': 'pancake.n.01'}, {'name': 'pantyhose', 'id': 754, 'frequency': 'r', 'synset': 'pantyhose.n.01'}, {'name': 'papaya', 'id': 755, 'frequency': 'r', 'synset': 'papaya.n.02'}, {'name': 'paper_plate', 'id': 756, 'frequency': 'f', 'synset': 'paper_plate.n.01'}, {'name': 'paper_towel', 'id': 757, 'frequency': 'f', 'synset': 'paper_towel.n.01'}, {'name': 'paperback_book', 'id': 758, 'frequency': 'r', 'synset': 'paperback_book.n.01'}, {'name': 'paperweight', 'id': 759, 'frequency': 'r', 'synset': 'paperweight.n.01'}, {'name': 'parachute', 'id': 760, 'frequency': 'c', 'synset': 'parachute.n.01'}, {'name': 'parakeet', 'id': 761, 'frequency': 'c', 'synset': 'parakeet.n.01'}, {'name': 'parasail_(sports)', 'id': 762, 'frequency': 'c', 'synset': 'parasail.n.01'}, {'name': 'parasol', 'id': 763, 'frequency': 'c', 'synset': 'parasol.n.01'}, {'name': 'parchment', 'id': 764, 'frequency': 'r', 'synset': 'parchment.n.01'}, {'name': 'parka', 'id': 765, 'frequency': 'c', 'synset': 'parka.n.01'}, {'name': 'parking_meter', 'id': 766, 'frequency': 'f', 'synset': 'parking_meter.n.01'}, {'name': 'parrot', 'id': 767, 'frequency': 'c', 'synset': 'parrot.n.01'}, {'name': 'passenger_car_(part_of_a_train)', 'id': 768, 'frequency': 'c', 'synset': 'passenger_car.n.01'}, {'name': 'passenger_ship', 'id': 769, 'frequency': 'r', 'synset': 'passenger_ship.n.01'}, {'name': 'passport', 'id': 770, 'frequency': 'c', 'synset': 'passport.n.02'}, {'name': 'pastry', 'id': 771, 'frequency': 'f', 'synset': 'pastry.n.02'}, {'name': 'patty_(food)', 'id': 772, 'frequency': 'r', 'synset': 'patty.n.01'}, {'name': 'pea_(food)', 'id': 773, 'frequency': 'c', 'synset': 'pea.n.01'}, {'name': 'peach', 'id': 774, 'frequency': 'c', 'synset': 'peach.n.03'}, {'name': 'peanut_butter', 'id': 775, 'frequency': 'c', 'synset': 'peanut_butter.n.01'}, {'name': 'pear', 'id': 776, 'frequency': 'f', 'synset': 'pear.n.01'}, {'name': 'peeler_(tool_for_fruit_and_vegetables)', 'id': 777, 'frequency': 'c', 'synset': 'peeler.n.03'}, {'name': 'wooden_leg', 'id': 778, 'frequency': 'r', 'synset': 'peg.n.04'}, {'name': 'pegboard', 'id': 779, 'frequency': 'r', 'synset': 'pegboard.n.01'}, {'name': 'pelican', 'id': 780, 'frequency': 'c', 'synset': 'pelican.n.01'}, {'name': 'pen', 'id': 781, 'frequency': 'f', 'synset': 'pen.n.01'}, {'name': 'pencil', 'id': 782, 'frequency': 'f', 'synset': 'pencil.n.01'}, {'name': 'pencil_box', 'id': 783, 'frequency': 'r', 'synset': 'pencil_box.n.01'}, {'name': 'pencil_sharpener', 'id': 784, 'frequency': 'r', 'synset': 'pencil_sharpener.n.01'}, {'name': 'pendulum', 'id': 785, 'frequency': 'r', 'synset': 'pendulum.n.01'}, {'name': 'penguin', 'id': 786, 'frequency': 'c', 'synset': 'penguin.n.01'}, {'name': 'pennant', 'id': 787, 'frequency': 'r', 'synset': 'pennant.n.02'}, {'name': 'penny_(coin)', 'id': 788, 'frequency': 'r', 'synset': 'penny.n.02'}, {'name': 'pepper', 'id': 789, 'frequency': 'f', 'synset': 'pepper.n.03'}, {'name': 'pepper_mill', 'id': 790, 'frequency': 'c', 'synset': 'pepper_mill.n.01'}, {'name': 'perfume', 'id': 791, 'frequency': 'c', 'synset': 'perfume.n.02'}, {'name': 'persimmon', 'id': 792, 'frequency': 'r', 'synset': 'persimmon.n.02'}, {'name': 'person', 'id': 793, 'frequency': 'f', 'synset': 'person.n.01'}, {'name': 'pet', 'id': 794, 'frequency': 'c', 'synset': 'pet.n.01'}, {'name': 'pew_(church_bench)', 'id': 795, 'frequency': 'c', 'synset': 'pew.n.01'}, {'name': 'phonebook', 'id': 796, 'frequency': 'r', 'synset': 'phonebook.n.01'}, {'name': 'phonograph_record', 'id': 797, 'frequency': 'c', 'synset': 'phonograph_record.n.01'}, {'name': 'piano', 'id': 798, 'frequency': 'f', 'synset': 'piano.n.01'}, {'name': 'pickle', 'id': 799, 'frequency': 'f', 'synset': 'pickle.n.01'}, {'name': 'pickup_truck', 'id': 800, 'frequency': 'f', 'synset': 'pickup.n.01'}, {'name': 'pie', 'id': 801, 'frequency': 'c', 'synset': 'pie.n.01'}, {'name': 'pigeon', 'id': 802, 'frequency': 'c', 'synset': 'pigeon.n.01'}, {'name': 'piggy_bank', 'id': 803, 'frequency': 'r', 'synset': 'piggy_bank.n.01'}, {'name': 'pillow', 'id': 804, 'frequency': 'f', 'synset': 'pillow.n.01'}, {'name': 'pin_(non_jewelry)', 'id': 805, 'frequency': 'r', 'synset': 'pin.n.09'}, {'name': 'pineapple', 'id': 806, 'frequency': 'f', 'synset': 'pineapple.n.02'}, {'name': 'pinecone', 'id': 807, 'frequency': 'c', 'synset': 'pinecone.n.01'}, {'name': 'ping-pong_ball', 'id': 808, 'frequency': 'r', 'synset': 'ping-pong_ball.n.01'}, {'name': 'pinwheel', 'id': 809, 'frequency': 'r', 'synset': 'pinwheel.n.03'}, {'name': 'tobacco_pipe', 'id': 810, 'frequency': 'r', 'synset': 'pipe.n.01'}, {'name': 'pipe', 'id': 811, 'frequency': 'f', 'synset': 'pipe.n.02'}, {'name': 'pistol', 'id': 812, 'frequency': 'r', 'synset': 'pistol.n.01'}, {'name': 'pita_(bread)', 'id': 813, 'frequency': 'c', 'synset': 'pita.n.01'}, {'name': 'pitcher_(vessel_for_liquid)', 'id': 814, 'frequency': 'f', 'synset': 'pitcher.n.02'}, {'name': 'pitchfork', 'id': 815, 'frequency': 'r', 'synset': 'pitchfork.n.01'}, {'name': 'pizza', 'id': 816, 'frequency': 'f', 'synset': 'pizza.n.01'}, {'name': 'place_mat', 'id': 817, 'frequency': 'f', 'synset': 'place_mat.n.01'}, {'name': 'plate', 'id': 818, 'frequency': 'f', 'synset': 'plate.n.04'}, {'name': 'platter', 'id': 819, 'frequency': 'c', 'synset': 'platter.n.01'}, {'name': 'playpen', 'id': 820, 'frequency': 'r', 'synset': 'playpen.n.01'}, {'name': 'pliers', 'id': 821, 'frequency': 'c', 'synset': 'pliers.n.01'}, {'name': 'plow_(farm_equipment)', 'id': 822, 'frequency': 'r', 'synset': 'plow.n.01'}, {'name': 'plume', 'id': 823, 'frequency': 'r', 'synset': 'plume.n.02'}, {'name': 'pocket_watch', 'id': 824, 'frequency': 'r', 'synset': 'pocket_watch.n.01'}, {'name': 'pocketknife', 'id': 825, 'frequency': 'c', 'synset': 'pocketknife.n.01'}, {'name': 'poker_(fire_stirring_tool)', 'id': 826, 'frequency': 'c', 'synset': 'poker.n.01'}, {'name': 'pole', 'id': 827, 'frequency': 'f', 'synset': 'pole.n.01'}, {'name': 'polo_shirt', 'id': 828, 'frequency': 'f', 'synset': 'polo_shirt.n.01'}, {'name': 'poncho', 'id': 829, 'frequency': 'r', 'synset': 'poncho.n.01'}, {'name': 'pony', 'id': 830, 'frequency': 'c', 'synset': 'pony.n.05'}, {'name': 'pool_table', 'id': 831, 'frequency': 'r', 'synset': 'pool_table.n.01'}, {'name': 'pop_(soda)', 'id': 832, 'frequency': 'f', 'synset': 'pop.n.02'}, {'name': 'postbox_(public)', 'id': 833, 'frequency': 'c', 'synset': 'postbox.n.01'}, {'name': 'postcard', 'id': 834, 'frequency': 'c', 'synset': 'postcard.n.01'}, {'name': 'poster', 'id': 835, 'frequency': 'f', 'synset': 'poster.n.01'}, {'name': 'pot', 'id': 836, 'frequency': 'f', 'synset': 'pot.n.01'}, {'name': 'flowerpot', 'id': 837, 'frequency': 'f', 'synset': 'pot.n.04'}, {'name': 'potato', 'id': 838, 'frequency': 'f', 'synset': 'potato.n.01'}, {'name': 'potholder', 'id': 839, 'frequency': 'c', 'synset': 'potholder.n.01'}, {'name': 'pottery', 'id': 840, 'frequency': 'c', 'synset': 'pottery.n.01'}, {'name': 'pouch', 'id': 841, 'frequency': 'c', 'synset': 'pouch.n.01'}, {'name': 'power_shovel', 'id': 842, 'frequency': 'c', 'synset': 'power_shovel.n.01'}, {'name': 'prawn', 'id': 843, 'frequency': 'c', 'synset': 'prawn.n.01'}, {'name': 'pretzel', 'id': 844, 'frequency': 'c', 'synset': 'pretzel.n.01'}, {'name': 'printer', 'id': 845, 'frequency': 'f', 'synset': 'printer.n.03'}, {'name': 'projectile_(weapon)', 'id': 846, 'frequency': 'c', 'synset': 'projectile.n.01'}, {'name': 'projector', 'id': 847, 'frequency': 'c', 'synset': 'projector.n.02'}, {'name': 'propeller', 'id': 848, 'frequency': 'f', 'synset': 'propeller.n.01'}, {'name': 'prune', 'id': 849, 'frequency': 'r', 'synset': 'prune.n.01'}, {'name': 'pudding', 'id': 850, 'frequency': 'r', 'synset': 'pudding.n.01'}, {'name': 'puffer_(fish)', 'id': 851, 'frequency': 'r', 'synset': 'puffer.n.02'}, {'name': 'puffin', 'id': 852, 'frequency': 'r', 'synset': 'puffin.n.01'}, {'name': 'pug-dog', 'id': 853, 'frequency': 'r', 'synset': 'pug.n.01'}, {'name': 'pumpkin', 'id': 854, 'frequency': 'c', 'synset': 'pumpkin.n.02'}, {'name': 'puncher', 'id': 855, 'frequency': 'r', 'synset': 'punch.n.03'}, {'name': 'puppet', 'id': 856, 'frequency': 'r', 'synset': 'puppet.n.01'}, {'name': 'puppy', 'id': 857, 'frequency': 'c', 'synset': 'puppy.n.01'}, {'name': 'quesadilla', 'id': 858, 'frequency': 'r', 'synset': 'quesadilla.n.01'}, {'name': 'quiche', 'id': 859, 'frequency': 'r', 'synset': 'quiche.n.02'}, {'name': 'quilt', 'id': 860, 'frequency': 'f', 'synset': 'quilt.n.01'}, {'name': 'rabbit', 'id': 861, 'frequency': 'c', 'synset': 'rabbit.n.01'}, {'name': 'race_car', 'id': 862, 'frequency': 'r', 'synset': 'racer.n.02'}, {'name': 'racket', 'id': 863, 'frequency': 'c', 'synset': 'racket.n.04'}, {'name': 'radar', 'id': 864, 'frequency': 'r', 'synset': 'radar.n.01'}, {'name': 'radiator', 'id': 865, 'frequency': 'f', 'synset': 'radiator.n.03'}, {'name': 'radio_receiver', 'id': 866, 'frequency': 'c', 'synset': 'radio_receiver.n.01'}, {'name': 'radish', 'id': 867, 'frequency': 'c', 'synset': 'radish.n.03'}, {'name': 'raft', 'id': 868, 'frequency': 'c', 'synset': 'raft.n.01'}, {'name': 'rag_doll', 'id': 869, 'frequency': 'r', 'synset': 'rag_doll.n.01'}, {'name': 'raincoat', 'id': 870, 'frequency': 'c', 'synset': 'raincoat.n.01'}, {'name': 'ram_(animal)', 'id': 871, 'frequency': 'c', 'synset': 'ram.n.05'}, {'name': 'raspberry', 'id': 872, 'frequency': 'c', 'synset': 'raspberry.n.02'}, {'name': 'rat', 'id': 873, 'frequency': 'r', 'synset': 'rat.n.01'}, {'name': 'razorblade', 'id': 874, 'frequency': 'c', 'synset': 'razorblade.n.01'}, {'name': 'reamer_(juicer)', 'id': 875, 'frequency': 'c', 'synset': 'reamer.n.01'}, {'name': 'rearview_mirror', 'id': 876, 'frequency': 'f', 'synset': 'rearview_mirror.n.01'}, {'name': 'receipt', 'id': 877, 'frequency': 'c', 'synset': 'receipt.n.02'}, {'name': 'recliner', 'id': 878, 'frequency': 'c', 'synset': 'recliner.n.01'}, {'name': 'record_player', 'id': 879, 'frequency': 'c', 'synset': 'record_player.n.01'}, {'name': 'reflector', 'id': 880, 'frequency': 'f', 'synset': 'reflector.n.01'}, {'name': 'remote_control', 'id': 881, 'frequency': 'f', 'synset': 'remote_control.n.01'}, {'name': 'rhinoceros', 'id': 882, 'frequency': 'c', 'synset': 'rhinoceros.n.01'}, {'name': 'rib_(food)', 'id': 883, 'frequency': 'r', 'synset': 'rib.n.03'}, {'name': 'rifle', 'id': 884, 'frequency': 'c', 'synset': 'rifle.n.01'}, {'name': 'ring', 'id': 885, 'frequency': 'f', 'synset': 'ring.n.08'}, {'name': 'river_boat', 'id': 886, 'frequency': 'r', 'synset': 'river_boat.n.01'}, {'name': 'road_map', 'id': 887, 'frequency': 'r', 'synset': 'road_map.n.02'}, {'name': 'robe', 'id': 888, 'frequency': 'c', 'synset': 'robe.n.01'}, {'name': 'rocking_chair', 'id': 889, 'frequency': 'c', 'synset': 'rocking_chair.n.01'}, {'name': 'rodent', 'id': 890, 'frequency': 'r', 'synset': 'rodent.n.01'}, {'name': 'roller_skate', 'id': 891, 'frequency': 'r', 'synset': 'roller_skate.n.01'}, {'name': 'Rollerblade', 'id': 892, 'frequency': 'r', 'synset': 'rollerblade.n.01'}, {'name': 'rolling_pin', 'id': 893, 'frequency': 'c', 'synset': 'rolling_pin.n.01'}, {'name': 'root_beer', 'id': 894, 'frequency': 'r', 'synset': 'root_beer.n.01'}, {'name': 'router_(computer_equipment)', 'id': 895, 'frequency': 'c', 'synset': 'router.n.02'}, {'name': 'rubber_band', 'id': 896, 'frequency': 'f', 'synset': 'rubber_band.n.01'}, {'name': 'runner_(carpet)', 'id': 897, 'frequency': 'c', 'synset': 'runner.n.08'}, {'name': 'plastic_bag', 'id': 898, 'frequency': 'f', 'synset': 'sack.n.01'}, {'name': 'saddle_(on_an_animal)', 'id': 899, 'frequency': 'f', 'synset': 'saddle.n.01'}, {'name': 'saddle_blanket', 'id': 900, 'frequency': 'f', 'synset': 'saddle_blanket.n.01'}, {'name': 'saddlebag', 'id': 901, 'frequency': 'c', 'synset': 'saddlebag.n.01'}, {'name': 'safety_pin', 'id': 902, 'frequency': 'r', 'synset': 'safety_pin.n.01'}, {'name': 'sail', 'id': 903, 'frequency': 'f', 'synset': 'sail.n.01'}, {'name': 'salad', 'id': 904, 'frequency': 'f', 'synset': 'salad.n.01'}, {'name': 'salad_plate', 'id': 905, 'frequency': 'r', 'synset': 'salad_plate.n.01'}, {'name': 'salami', 'id': 906, 'frequency': 'c', 'synset': 'salami.n.01'}, {'name': 'salmon_(fish)', 'id': 907, 'frequency': 'c', 'synset': 'salmon.n.01'}, {'name': 'salmon_(food)', 'id': 908, 'frequency': 'r', 'synset': 'salmon.n.03'}, {'name': 'salsa', 'id': 909, 'frequency': 'c', 'synset': 'salsa.n.01'}, {'name': 'saltshaker', 'id': 910, 'frequency': 'f', 'synset': 'saltshaker.n.01'}, {'name': 'sandal_(type_of_shoe)', 'id': 911, 'frequency': 'f', 'synset': 'sandal.n.01'}, {'name': 'sandwich', 'id': 912, 'frequency': 'f', 'synset': 'sandwich.n.01'}, {'name': 'satchel', 'id': 913, 'frequency': 'r', 'synset': 'satchel.n.01'}, {'name': 'saucepan', 'id': 914, 'frequency': 'r', 'synset': 'saucepan.n.01'}, {'name': 'saucer', 'id': 915, 'frequency': 'f', 'synset': 'saucer.n.02'}, {'name': 'sausage', 'id': 916, 'frequency': 'f', 'synset': 'sausage.n.01'}, {'name': 'sawhorse', 'id': 917, 'frequency': 'r', 'synset': 'sawhorse.n.01'}, {'name': 'saxophone', 'id': 918, 'frequency': 'r', 'synset': 'sax.n.02'}, {'name': 'scale_(measuring_instrument)', 'id': 919, 'frequency': 'f', 'synset': 'scale.n.07'}, {'name': 'scarecrow', 'id': 920, 'frequency': 'r', 'synset': 'scarecrow.n.01'}, {'name': 'scarf', 'id': 921, 'frequency': 'f', 'synset': 'scarf.n.01'}, {'name': 'school_bus', 'id': 922, 'frequency': 'c', 'synset': 'school_bus.n.01'}, {'name': 'scissors', 'id': 923, 'frequency': 'f', 'synset': 'scissors.n.01'}, {'name': 'scoreboard', 'id': 924, 'frequency': 'f', 'synset': 'scoreboard.n.01'}, {'name': 'scraper', 'id': 925, 'frequency': 'r', 'synset': 'scraper.n.01'}, {'name': 'screwdriver', 'id': 926, 'frequency': 'c', 'synset': 'screwdriver.n.01'}, {'name': 'scrubbing_brush', 'id': 927, 'frequency': 'f', 'synset': 'scrub_brush.n.01'}, {'name': 'sculpture', 'id': 928, 'frequency': 'c', 'synset': 'sculpture.n.01'}, {'name': 'seabird', 'id': 929, 'frequency': 'c', 'synset': 'seabird.n.01'}, {'name': 'seahorse', 'id': 930, 'frequency': 'c', 'synset': 'seahorse.n.02'}, {'name': 'seaplane', 'id': 931, 'frequency': 'r', 'synset': 'seaplane.n.01'}, {'name': 'seashell', 'id': 932, 'frequency': 'c', 'synset': 'seashell.n.01'}, {'name': 'sewing_machine', 'id': 933, 'frequency': 'c', 'synset': 'sewing_machine.n.01'}, {'name': 'shaker', 'id': 934, 'frequency': 'c', 'synset': 'shaker.n.03'}, {'name': 'shampoo', 'id': 935, 'frequency': 'c', 'synset': 'shampoo.n.01'}, {'name': 'shark', 'id': 936, 'frequency': 'c', 'synset': 'shark.n.01'}, {'name': 'sharpener', 'id': 937, 'frequency': 'r', 'synset': 'sharpener.n.01'}, {'name': 'Sharpie', 'id': 938, 'frequency': 'r', 'synset': 'sharpie.n.03'}, {'name': 'shaver_(electric)', 'id': 939, 'frequency': 'r', 'synset': 'shaver.n.03'}, {'name': 'shaving_cream', 'id': 940, 'frequency': 'c', 'synset': 'shaving_cream.n.01'}, {'name': 'shawl', 'id': 941, 'frequency': 'r', 'synset': 'shawl.n.01'}, {'name': 'shears', 'id': 942, 'frequency': 'r', 'synset': 'shears.n.01'}, {'name': 'sheep', 'id': 943, 'frequency': 'f', 'synset': 'sheep.n.01'}, {'name': 'shepherd_dog', 'id': 944, 'frequency': 'r', 'synset': 'shepherd_dog.n.01'}, {'name': 'sherbert', 'id': 945, 'frequency': 'r', 'synset': 'sherbert.n.01'}, {'name': 'shield', 'id': 946, 'frequency': 'c', 'synset': 'shield.n.02'}, {'name': 'shirt', 'id': 947, 'frequency': 'f', 'synset': 'shirt.n.01'}, {'name': 'shoe', 'id': 948, 'frequency': 'f', 'synset': 'shoe.n.01'}, {'name': 'shopping_bag', 'id': 949, 'frequency': 'f', 'synset': 'shopping_bag.n.01'}, {'name': 'shopping_cart', 'id': 950, 'frequency': 'c', 'synset': 'shopping_cart.n.01'}, {'name': 'short_pants', 'id': 951, 'frequency': 'f', 'synset': 'short_pants.n.01'}, {'name': 'shot_glass', 'id': 952, 'frequency': 'r', 'synset': 'shot_glass.n.01'}, {'name': 'shoulder_bag', 'id': 953, 'frequency': 'f', 'synset': 'shoulder_bag.n.01'}, {'name': 'shovel', 'id': 954, 'frequency': 'c', 'synset': 'shovel.n.01'}, {'name': 'shower_head', 'id': 955, 'frequency': 'f', 'synset': 'shower.n.01'}, {'name': 'shower_cap', 'id': 956, 'frequency': 'r', 'synset': 'shower_cap.n.01'}, {'name': 'shower_curtain', 'id': 957, 'frequency': 'f', 'synset': 'shower_curtain.n.01'}, {'name': 'shredder_(for_paper)', 'id': 958, 'frequency': 'r', 'synset': 'shredder.n.01'}, {'name': 'signboard', 'id': 959, 'frequency': 'f', 'synset': 'signboard.n.01'}, {'name': 'silo', 'id': 960, 'frequency': 'c', 'synset': 'silo.n.01'}, {'name': 'sink', 'id': 961, 'frequency': 'f', 'synset': 'sink.n.01'}, {'name': 'skateboard', 'id': 962, 'frequency': 'f', 'synset': 'skateboard.n.01'}, {'name': 'skewer', 'id': 963, 'frequency': 'c', 'synset': 'skewer.n.01'}, {'name': 'ski', 'id': 964, 'frequency': 'f', 'synset': 'ski.n.01'}, {'name': 'ski_boot', 'id': 965, 'frequency': 'f', 'synset': 'ski_boot.n.01'}, {'name': 'ski_parka', 'id': 966, 'frequency': 'f', 'synset': 'ski_parka.n.01'}, {'name': 'ski_pole', 'id': 967, 'frequency': 'f', 'synset': 'ski_pole.n.01'}, {'name': 'skirt', 'id': 968, 'frequency': 'f', 'synset': 'skirt.n.02'}, {'name': 'skullcap', 'id': 969, 'frequency': 'r', 'synset': 'skullcap.n.01'}, {'name': 'sled', 'id': 970, 'frequency': 'c', 'synset': 'sled.n.01'}, {'name': 'sleeping_bag', 'id': 971, 'frequency': 'c', 'synset': 'sleeping_bag.n.01'}, {'name': 'sling_(bandage)', 'id': 972, 'frequency': 'r', 'synset': 'sling.n.05'}, {'name': 'slipper_(footwear)', 'id': 973, 'frequency': 'c', 'synset': 'slipper.n.01'}, {'name': 'smoothie', 'id': 974, 'frequency': 'r', 'synset': 'smoothie.n.02'}, {'name': 'snake', 'id': 975, 'frequency': 'r', 'synset': 'snake.n.01'}, {'name': 'snowboard', 'id': 976, 'frequency': 'f', 'synset': 'snowboard.n.01'}, {'name': 'snowman', 'id': 977, 'frequency': 'c', 'synset': 'snowman.n.01'}, {'name': 'snowmobile', 'id': 978, 'frequency': 'c', 'synset': 'snowmobile.n.01'}, {'name': 'soap', 'id': 979, 'frequency': 'f', 'synset': 'soap.n.01'}, {'name': 'soccer_ball', 'id': 980, 'frequency': 'f', 'synset': 'soccer_ball.n.01'}, {'name': 'sock', 'id': 981, 'frequency': 'f', 'synset': 'sock.n.01'}, {'name': 'sofa', 'id': 982, 'frequency': 'f', 'synset': 'sofa.n.01'}, {'name': 'softball', 'id': 983, 'frequency': 'r', 'synset': 'softball.n.01'}, {'name': 'solar_array', 'id': 984, 'frequency': 'c', 'synset': 'solar_array.n.01'}, {'name': 'sombrero', 'id': 985, 'frequency': 'r', 'synset': 'sombrero.n.02'}, {'name': 'soup', 'id': 986, 'frequency': 'f', 'synset': 'soup.n.01'}, {'name': 'soup_bowl', 'id': 987, 'frequency': 'r', 'synset': 'soup_bowl.n.01'}, {'name': 'soupspoon', 'id': 988, 'frequency': 'c', 'synset': 'soupspoon.n.01'}, {'name': 'sour_cream', 'id': 989, 'frequency': 'c', 'synset': 'sour_cream.n.01'}, {'name': 'soya_milk', 'id': 990, 'frequency': 'r', 'synset': 'soya_milk.n.01'}, {'name': 'space_shuttle', 'id': 991, 'frequency': 'r', 'synset': 'space_shuttle.n.01'}, {'name': 'sparkler_(fireworks)', 'id': 992, 'frequency': 'r', 'synset': 'sparkler.n.02'}, {'name': 'spatula', 'id': 993, 'frequency': 'f', 'synset': 'spatula.n.02'}, {'name': 'spear', 'id': 994, 'frequency': 'r', 'synset': 'spear.n.01'}, {'name': 'spectacles', 'id': 995, 'frequency': 'f', 'synset': 'spectacles.n.01'}, {'name': 'spice_rack', 'id': 996, 'frequency': 'c', 'synset': 'spice_rack.n.01'}, {'name': 'spider', 'id': 997, 'frequency': 'c', 'synset': 'spider.n.01'}, {'name': 'crawfish', 'id': 998, 'frequency': 'r', 'synset': 'spiny_lobster.n.02'}, {'name': 'sponge', 'id': 999, 'frequency': 'c', 'synset': 'sponge.n.01'}, {'name': 'spoon', 'id': 1000, 'frequency': 'f', 'synset': 'spoon.n.01'}, {'name': 'sportswear', 'id': 1001, 'frequency': 'c', 'synset': 'sportswear.n.01'}, {'name': 'spotlight', 'id': 1002, 'frequency': 'c', 'synset': 'spotlight.n.02'}, {'name': 'squid_(food)', 'id': 1003, 'frequency': 'r', 'synset': 'squid.n.01'}, {'name': 'squirrel', 'id': 1004, 'frequency': 'c', 'synset': 'squirrel.n.01'}, {'name': 'stagecoach', 'id': 1005, 'frequency': 'r', 'synset': 'stagecoach.n.01'}, {'name': 'stapler_(stapling_machine)', 'id': 1006, 'frequency': 'c', 'synset': 'stapler.n.01'}, {'name': 'starfish', 'id': 1007, 'frequency': 'c', 'synset': 'starfish.n.01'}, {'name': 'statue_(sculpture)', 'id': 1008, 'frequency': 'f', 'synset': 'statue.n.01'}, {'name': 'steak_(food)', 'id': 1009, 'frequency': 'c', 'synset': 'steak.n.01'}, {'name': 'steak_knife', 'id': 1010, 'frequency': 'r', 'synset': 'steak_knife.n.01'}, {'name': 'steering_wheel', 'id': 1011, 'frequency': 'f', 'synset': 'steering_wheel.n.01'}, {'name': 'stepladder', 'id': 1012, 'frequency': 'r', 'synset': 'step_ladder.n.01'}, {'name': 'step_stool', 'id': 1013, 'frequency': 'c', 'synset': 'step_stool.n.01'}, {'name': 'stereo_(sound_system)', 'id': 1014, 'frequency': 'c', 'synset': 'stereo.n.01'}, {'name': 'stew', 'id': 1015, 'frequency': 'r', 'synset': 'stew.n.02'}, {'name': 'stirrer', 'id': 1016, 'frequency': 'r', 'synset': 'stirrer.n.02'}, {'name': 'stirrup', 'id': 1017, 'frequency': 'f', 'synset': 'stirrup.n.01'}, {'name': 'stool', 'id': 1018, 'frequency': 'f', 'synset': 'stool.n.01'}, {'name': 'stop_sign', 'id': 1019, 'frequency': 'f', 'synset': 'stop_sign.n.01'}, {'name': 'brake_light', 'id': 1020, 'frequency': 'f', 'synset': 'stoplight.n.01'}, {'name': 'stove', 'id': 1021, 'frequency': 'f', 'synset': 'stove.n.01'}, {'name': 'strainer', 'id': 1022, 'frequency': 'c', 'synset': 'strainer.n.01'}, {'name': 'strap', 'id': 1023, 'frequency': 'f', 'synset': 'strap.n.01'}, {'name': 'straw_(for_drinking)', 'id': 1024, 'frequency': 'f', 'synset': 'straw.n.04'}, {'name': 'strawberry', 'id': 1025, 'frequency': 'f', 'synset': 'strawberry.n.01'}, {'name': 'street_sign', 'id': 1026, 'frequency': 'f', 'synset': 'street_sign.n.01'}, {'name': 'streetlight', 'id': 1027, 'frequency': 'f', 'synset': 'streetlight.n.01'}, {'name': 'string_cheese', 'id': 1028, 'frequency': 'r', 'synset': 'string_cheese.n.01'}, {'name': 'stylus', 'id': 1029, 'frequency': 'r', 'synset': 'stylus.n.02'}, {'name': 'subwoofer', 'id': 1030, 'frequency': 'r', 'synset': 'subwoofer.n.01'}, {'name': 'sugar_bowl', 'id': 1031, 'frequency': 'r', 'synset': 'sugar_bowl.n.01'}, {'name': 'sugarcane_(plant)', 'id': 1032, 'frequency': 'r', 'synset': 'sugarcane.n.01'}, {'name': 'suit_(clothing)', 'id': 1033, 'frequency': 'f', 'synset': 'suit.n.01'}, {'name': 'sunflower', 'id': 1034, 'frequency': 'c', 'synset': 'sunflower.n.01'}, {'name': 'sunglasses', 'id': 1035, 'frequency': 'f', 'synset': 'sunglasses.n.01'}, {'name': 'sunhat', 'id': 1036, 'frequency': 'c', 'synset': 'sunhat.n.01'}, {'name': 'surfboard', 'id': 1037, 'frequency': 'f', 'synset': 'surfboard.n.01'}, {'name': 'sushi', 'id': 1038, 'frequency': 'c', 'synset': 'sushi.n.01'}, {'name': 'mop', 'id': 1039, 'frequency': 'c', 'synset': 'swab.n.02'}, {'name': 'sweat_pants', 'id': 1040, 'frequency': 'c', 'synset': 'sweat_pants.n.01'}, {'name': 'sweatband', 'id': 1041, 'frequency': 'c', 'synset': 'sweatband.n.02'}, {'name': 'sweater', 'id': 1042, 'frequency': 'f', 'synset': 'sweater.n.01'}, {'name': 'sweatshirt', 'id': 1043, 'frequency': 'f', 'synset': 'sweatshirt.n.01'}, {'name': 'sweet_potato', 'id': 1044, 'frequency': 'c', 'synset': 'sweet_potato.n.02'}, {'name': 'swimsuit', 'id': 1045, 'frequency': 'f', 'synset': 'swimsuit.n.01'}, {'name': 'sword', 'id': 1046, 'frequency': 'c', 'synset': 'sword.n.01'}, {'name': 'syringe', 'id': 1047, 'frequency': 'r', 'synset': 'syringe.n.01'}, {'name': 'Tabasco_sauce', 'id': 1048, 'frequency': 'r', 'synset': 'tabasco.n.02'}, {'name': 'table-tennis_table', 'id': 1049, 'frequency': 'r', 'synset': 'table-tennis_table.n.01'}, {'name': 'table', 'id': 1050, 'frequency': 'f', 'synset': 'table.n.02'}, {'name': 'table_lamp', 'id': 1051, 'frequency': 'c', 'synset': 'table_lamp.n.01'}, {'name': 'tablecloth', 'id': 1052, 'frequency': 'f', 'synset': 'tablecloth.n.01'}, {'name': 'tachometer', 'id': 1053, 'frequency': 'r', 'synset': 'tachometer.n.01'}, {'name': 'taco', 'id': 1054, 'frequency': 'r', 'synset': 'taco.n.02'}, {'name': 'tag', 'id': 1055, 'frequency': 'f', 'synset': 'tag.n.02'}, {'name': 'taillight', 'id': 1056, 'frequency': 'f', 'synset': 'taillight.n.01'}, {'name': 'tambourine', 'id': 1057, 'frequency': 'r', 'synset': 'tambourine.n.01'}, {'name': 'army_tank', 'id': 1058, 'frequency': 'r', 'synset': 'tank.n.01'}, {'name': 'tank_(storage_vessel)', 'id': 1059, 'frequency': 'f', 'synset': 'tank.n.02'}, {'name': 'tank_top_(clothing)', 'id': 1060, 'frequency': 'f', 'synset': 'tank_top.n.01'}, {'name': 'tape_(sticky_cloth_or_paper)', 'id': 1061, 'frequency': 'f', 'synset': 'tape.n.01'}, {'name': 'tape_measure', 'id': 1062, 'frequency': 'c', 'synset': 'tape.n.04'}, {'name': 'tapestry', 'id': 1063, 'frequency': 'c', 'synset': 'tapestry.n.02'}, {'name': 'tarp', 'id': 1064, 'frequency': 'f', 'synset': 'tarpaulin.n.01'}, {'name': 'tartan', 'id': 1065, 'frequency': 'c', 'synset': 'tartan.n.01'}, {'name': 'tassel', 'id': 1066, 'frequency': 'c', 'synset': 'tassel.n.01'}, {'name': 'tea_bag', 'id': 1067, 'frequency': 'c', 'synset': 'tea_bag.n.01'}, {'name': 'teacup', 'id': 1068, 'frequency': 'c', 'synset': 'teacup.n.02'}, {'name': 'teakettle', 'id': 1069, 'frequency': 'c', 'synset': 'teakettle.n.01'}, {'name': 'teapot', 'id': 1070, 'frequency': 'f', 'synset': 'teapot.n.01'}, {'name': 'teddy_bear', 'id': 1071, 'frequency': 'f', 'synset': 'teddy.n.01'}, {'name': 'telephone', 'id': 1072, 'frequency': 'f', 'synset': 'telephone.n.01'}, {'name': 'telephone_booth', 'id': 1073, 'frequency': 'c', 'synset': 'telephone_booth.n.01'}, {'name': 'telephone_pole', 'id': 1074, 'frequency': 'f', 'synset': 'telephone_pole.n.01'}, {'name': 'telephoto_lens', 'id': 1075, 'frequency': 'r', 'synset': 'telephoto_lens.n.01'}, {'name': 'television_camera', 'id': 1076, 'frequency': 'c', 'synset': 'television_camera.n.01'}, {'name': 'television_set', 'id': 1077, 'frequency': 'f', 'synset': 'television_receiver.n.01'}, {'name': 'tennis_ball', 'id': 1078, 'frequency': 'f', 'synset': 'tennis_ball.n.01'}, {'name': 'tennis_racket', 'id': 1079, 'frequency': 'f', 'synset': 'tennis_racket.n.01'}, {'name': 'tequila', 'id': 1080, 'frequency': 'r', 'synset': 'tequila.n.01'}, {'name': 'thermometer', 'id': 1081, 'frequency': 'c', 'synset': 'thermometer.n.01'}, {'name': 'thermos_bottle', 'id': 1082, 'frequency': 'c', 'synset': 'thermos.n.01'}, {'name': 'thermostat', 'id': 1083, 'frequency': 'f', 'synset': 'thermostat.n.01'}, {'name': 'thimble', 'id': 1084, 'frequency': 'r', 'synset': 'thimble.n.02'}, {'name': 'thread', 'id': 1085, 'frequency': 'c', 'synset': 'thread.n.01'}, {'name': 'thumbtack', 'id': 1086, 'frequency': 'c', 'synset': 'thumbtack.n.01'}, {'name': 'tiara', 'id': 1087, 'frequency': 'c', 'synset': 'tiara.n.01'}, {'name': 'tiger', 'id': 1088, 'frequency': 'c', 'synset': 'tiger.n.02'}, {'name': 'tights_(clothing)', 'id': 1089, 'frequency': 'c', 'synset': 'tights.n.01'}, {'name': 'timer', 'id': 1090, 'frequency': 'c', 'synset': 'timer.n.01'}, {'name': 'tinfoil', 'id': 1091, 'frequency': 'f', 'synset': 'tinfoil.n.01'}, {'name': 'tinsel', 'id': 1092, 'frequency': 'c', 'synset': 'tinsel.n.01'}, {'name': 'tissue_paper', 'id': 1093, 'frequency': 'f', 'synset': 'tissue.n.02'}, {'name': 'toast_(food)', 'id': 1094, 'frequency': 'c', 'synset': 'toast.n.01'}, {'name': 'toaster', 'id': 1095, 'frequency': 'f', 'synset': 'toaster.n.02'}, {'name': 'toaster_oven', 'id': 1096, 'frequency': 'f', 'synset': 'toaster_oven.n.01'}, {'name': 'toilet', 'id': 1097, 'frequency': 'f', 'synset': 'toilet.n.02'}, {'name': 'toilet_tissue', 'id': 1098, 'frequency': 'f', 'synset': 'toilet_tissue.n.01'}, {'name': 'tomato', 'id': 1099, 'frequency': 'f', 'synset': 'tomato.n.01'}, {'name': 'tongs', 'id': 1100, 'frequency': 'f', 'synset': 'tongs.n.01'}, {'name': 'toolbox', 'id': 1101, 'frequency': 'c', 'synset': 'toolbox.n.01'}, {'name': 'toothbrush', 'id': 1102, 'frequency': 'f', 'synset': 'toothbrush.n.01'}, {'name': 'toothpaste', 'id': 1103, 'frequency': 'f', 'synset': 'toothpaste.n.01'}, {'name': 'toothpick', 'id': 1104, 'frequency': 'f', 'synset': 'toothpick.n.01'}, {'name': 'cover', 'id': 1105, 'frequency': 'f', 'synset': 'top.n.09'}, {'name': 'tortilla', 'id': 1106, 'frequency': 'c', 'synset': 'tortilla.n.01'}, {'name': 'tow_truck', 'id': 1107, 'frequency': 'c', 'synset': 'tow_truck.n.01'}, {'name': 'towel', 'id': 1108, 'frequency': 'f', 'synset': 'towel.n.01'}, {'name': 'towel_rack', 'id': 1109, 'frequency': 'f', 'synset': 'towel_rack.n.01'}, {'name': 'toy', 'id': 1110, 'frequency': 'f', 'synset': 'toy.n.03'}, {'name': 'tractor_(farm_equipment)', 'id': 1111, 'frequency': 'c', 'synset': 'tractor.n.01'}, {'name': 'traffic_light', 'id': 1112, 'frequency': 'f', 'synset': 'traffic_light.n.01'}, {'name': 'dirt_bike', 'id': 1113, 'frequency': 'c', 'synset': 'trail_bike.n.01'}, {'name': 'trailer_truck', 'id': 1114, 'frequency': 'f', 'synset': 'trailer_truck.n.01'}, {'name': 'train_(railroad_vehicle)', 'id': 1115, 'frequency': 'f', 'synset': 'train.n.01'}, {'name': 'trampoline', 'id': 1116, 'frequency': 'r', 'synset': 'trampoline.n.01'}, {'name': 'tray', 'id': 1117, 'frequency': 'f', 'synset': 'tray.n.01'}, {'name': 'trench_coat', 'id': 1118, 'frequency': 'r', 'synset': 'trench_coat.n.01'}, {'name': 'triangle_(musical_instrument)', 'id': 1119, 'frequency': 'r', 'synset': 'triangle.n.05'}, {'name': 'tricycle', 'id': 1120, 'frequency': 'c', 'synset': 'tricycle.n.01'}, {'name': 'tripod', 'id': 1121, 'frequency': 'f', 'synset': 'tripod.n.01'}, {'name': 'trousers', 'id': 1122, 'frequency': 'f', 'synset': 'trouser.n.01'}, {'name': 'truck', 'id': 1123, 'frequency': 'f', 'synset': 'truck.n.01'}, {'name': 'truffle_(chocolate)', 'id': 1124, 'frequency': 'r', 'synset': 'truffle.n.03'}, {'name': 'trunk', 'id': 1125, 'frequency': 'c', 'synset': 'trunk.n.02'}, {'name': 'vat', 'id': 1126, 'frequency': 'r', 'synset': 'tub.n.02'}, {'name': 'turban', 'id': 1127, 'frequency': 'c', 'synset': 'turban.n.01'}, {'name': 'turkey_(food)', 'id': 1128, 'frequency': 'c', 'synset': 'turkey.n.04'}, {'name': 'turnip', 'id': 1129, 'frequency': 'r', 'synset': 'turnip.n.01'}, {'name': 'turtle', 'id': 1130, 'frequency': 'c', 'synset': 'turtle.n.02'}, {'name': 'turtleneck_(clothing)', 'id': 1131, 'frequency': 'c', 'synset': 'turtleneck.n.01'}, {'name': 'typewriter', 'id': 1132, 'frequency': 'c', 'synset': 'typewriter.n.01'}, {'name': 'umbrella', 'id': 1133, 'frequency': 'f', 'synset': 'umbrella.n.01'}, {'name': 'underwear', 'id': 1134, 'frequency': 'f', 'synset': 'underwear.n.01'}, {'name': 'unicycle', 'id': 1135, 'frequency': 'r', 'synset': 'unicycle.n.01'}, {'name': 'urinal', 'id': 1136, 'frequency': 'f', 'synset': 'urinal.n.01'}, {'name': 'urn', 'id': 1137, 'frequency': 'c', 'synset': 'urn.n.01'}, {'name': 'vacuum_cleaner', 'id': 1138, 'frequency': 'c', 'synset': 'vacuum.n.04'}, {'name': 'vase', 'id': 1139, 'frequency': 'f', 'synset': 'vase.n.01'}, {'name': 'vending_machine', 'id': 1140, 'frequency': 'c', 'synset': 'vending_machine.n.01'}, {'name': 'vent', 'id': 1141, 'frequency': 'f', 'synset': 'vent.n.01'}, {'name': 'vest', 'id': 1142, 'frequency': 'f', 'synset': 'vest.n.01'}, {'name': 'videotape', 'id': 1143, 'frequency': 'c', 'synset': 'videotape.n.01'}, {'name': 'vinegar', 'id': 1144, 'frequency': 'r', 'synset': 'vinegar.n.01'}, {'name': 'violin', 'id': 1145, 'frequency': 'r', 'synset': 'violin.n.01'}, {'name': 'vodka', 'id': 1146, 'frequency': 'r', 'synset': 'vodka.n.01'}, {'name': 'volleyball', 'id': 1147, 'frequency': 'c', 'synset': 'volleyball.n.02'}, {'name': 'vulture', 'id': 1148, 'frequency': 'r', 'synset': 'vulture.n.01'}, {'name': 'waffle', 'id': 1149, 'frequency': 'c', 'synset': 'waffle.n.01'}, {'name': 'waffle_iron', 'id': 1150, 'frequency': 'r', 'synset': 'waffle_iron.n.01'}, {'name': 'wagon', 'id': 1151, 'frequency': 'c', 'synset': 'wagon.n.01'}, {'name': 'wagon_wheel', 'id': 1152, 'frequency': 'c', 'synset': 'wagon_wheel.n.01'}, {'name': 'walking_stick', 'id': 1153, 'frequency': 'c', 'synset': 'walking_stick.n.01'}, {'name': 'wall_clock', 'id': 1154, 'frequency': 'c', 'synset': 'wall_clock.n.01'}, {'name': 'wall_socket', 'id': 1155, 'frequency': 'f', 'synset': 'wall_socket.n.01'}, {'name': 'wallet', 'id': 1156, 'frequency': 'f', 'synset': 'wallet.n.01'}, {'name': 'walrus', 'id': 1157, 'frequency': 'r', 'synset': 'walrus.n.01'}, {'name': 'wardrobe', 'id': 1158, 'frequency': 'r', 'synset': 'wardrobe.n.01'}, {'name': 'washbasin', 'id': 1159, 'frequency': 'r', 'synset': 'washbasin.n.01'}, {'name': 'automatic_washer', 'id': 1160, 'frequency': 'c', 'synset': 'washer.n.03'}, {'name': 'watch', 'id': 1161, 'frequency': 'f', 'synset': 'watch.n.01'}, {'name': 'water_bottle', 'id': 1162, 'frequency': 'f', 'synset': 'water_bottle.n.01'}, {'name': 'water_cooler', 'id': 1163, 'frequency': 'c', 'synset': 'water_cooler.n.01'}, {'name': 'water_faucet', 'id': 1164, 'frequency': 'c', 'synset': 'water_faucet.n.01'}, {'name': 'water_heater', 'id': 1165, 'frequency': 'r', 'synset': 'water_heater.n.01'}, {'name': 'water_jug', 'id': 1166, 'frequency': 'c', 'synset': 'water_jug.n.01'}, {'name': 'water_gun', 'id': 1167, 'frequency': 'r', 'synset': 'water_pistol.n.01'}, {'name': 'water_scooter', 'id': 1168, 'frequency': 'c', 'synset': 'water_scooter.n.01'}, {'name': 'water_ski', 'id': 1169, 'frequency': 'c', 'synset': 'water_ski.n.01'}, {'name': 'water_tower', 'id': 1170, 'frequency': 'c', 'synset': 'water_tower.n.01'}, {'name': 'watering_can', 'id': 1171, 'frequency': 'c', 'synset': 'watering_can.n.01'}, {'name': 'watermelon', 'id': 1172, 'frequency': 'f', 'synset': 'watermelon.n.02'}, {'name': 'weathervane', 'id': 1173, 'frequency': 'f', 'synset': 'weathervane.n.01'}, {'name': 'webcam', 'id': 1174, 'frequency': 'c', 'synset': 'webcam.n.01'}, {'name': 'wedding_cake', 'id': 1175, 'frequency': 'c', 'synset': 'wedding_cake.n.01'}, {'name': 'wedding_ring', 'id': 1176, 'frequency': 'c', 'synset': 'wedding_ring.n.01'}, {'name': 'wet_suit', 'id': 1177, 'frequency': 'f', 'synset': 'wet_suit.n.01'}, {'name': 'wheel', 'id': 1178, 'frequency': 'f', 'synset': 'wheel.n.01'}, {'name': 'wheelchair', 'id': 1179, 'frequency': 'c', 'synset': 'wheelchair.n.01'}, {'name': 'whipped_cream', 'id': 1180, 'frequency': 'c', 'synset': 'whipped_cream.n.01'}, {'name': 'whistle', 'id': 1181, 'frequency': 'c', 'synset': 'whistle.n.03'}, {'name': 'wig', 'id': 1182, 'frequency': 'c', 'synset': 'wig.n.01'}, {'name': 'wind_chime', 'id': 1183, 'frequency': 'c', 'synset': 'wind_chime.n.01'}, {'name': 'windmill', 'id': 1184, 'frequency': 'c', 'synset': 'windmill.n.01'}, {'name': 'window_box_(for_plants)', 'id': 1185, 'frequency': 'c', 'synset': 'window_box.n.01'}, {'name': 'windshield_wiper', 'id': 1186, 'frequency': 'f', 'synset': 'windshield_wiper.n.01'}, {'name': 'windsock', 'id': 1187, 'frequency': 'c', 'synset': 'windsock.n.01'}, {'name': 'wine_bottle', 'id': 1188, 'frequency': 'f', 'synset': 'wine_bottle.n.01'}, {'name': 'wine_bucket', 'id': 1189, 'frequency': 'c', 'synset': 'wine_bucket.n.01'}, {'name': 'wineglass', 'id': 1190, 'frequency': 'f', 'synset': 'wineglass.n.01'}, {'name': 'blinder_(for_horses)', 'id': 1191, 'frequency': 'f', 'synset': 'winker.n.02'}, {'name': 'wok', 'id': 1192, 'frequency': 'c', 'synset': 'wok.n.01'}, {'name': 'wolf', 'id': 1193, 'frequency': 'r', 'synset': 'wolf.n.01'}, {'name': 'wooden_spoon', 'id': 1194, 'frequency': 'c', 'synset': 'wooden_spoon.n.02'}, {'name': 'wreath', 'id': 1195, 'frequency': 'c', 'synset': 'wreath.n.01'}, {'name': 'wrench', 'id': 1196, 'frequency': 'c', 'synset': 'wrench.n.03'}, {'name': 'wristband', 'id': 1197, 'frequency': 'f', 'synset': 'wristband.n.01'}, {'name': 'wristlet', 'id': 1198, 'frequency': 'f', 'synset': 'wristlet.n.01'}, {'name': 'yacht', 'id': 1199, 'frequency': 'c', 'synset': 'yacht.n.01'}, {'name': 'yogurt', 'id': 1200, 'frequency': 'c', 'synset': 'yogurt.n.01'}, {'name': 'yoke_(animal_equipment)', 'id': 1201, 'frequency': 'c', 'synset': 'yoke.n.07'}, {'name': 'zebra', 'id': 1202, 'frequency': 'f', 'synset': 'zebra.n.01'}, {'name': 'zucchini', 'id': 1203, 'frequency': 'c', 'synset': 'zucchini.n.02'}, {'id': 1204, 'synset': 'organism.n.01', 'name': 'organism'}, {'id': 1205, 'synset': 'benthos.n.02', 'name': 'benthos'}, {'id': 1206, 'synset': 'heterotroph.n.01', 'name': 'heterotroph'}, {'id': 1207, 'synset': 'cell.n.02', 'name': 'cell'}, {'id': 1208, 'synset': 'animal.n.01', 'name': 'animal'}, {'id': 1209, 'synset': 'plant.n.02', 'name': 'plant'}, {'id': 1210, 'synset': 'food.n.01', 'name': 'food'}, {'id': 1211, 'synset': 'artifact.n.01', 'name': 'artifact'}, {'id': 1212, 'synset': 'hop.n.01', 'name': 'hop'}, {'id': 1213, 'synset': 'check-in.n.01', 'name': 'check-in'}, {'id': 1214, 'synset': 'dressage.n.01', 'name': 'dressage'}, {'id': 1215, 'synset': 'curvet.n.01', 'name': 'curvet'}, {'id': 1216, 'synset': 'piaffe.n.01', 'name': 'piaffe'}, {'id': 1217, 'synset': 'funambulism.n.01', 'name': 'funambulism'}, {'id': 1218, 'synset': 'rock_climbing.n.01', 'name': 'rock_climbing'}, {'id': 1219, 'synset': 'contact_sport.n.01', 'name': 'contact_sport'}, {'id': 1220, 'synset': 'outdoor_sport.n.01', 'name': 'outdoor_sport'}, {'id': 1221, 'synset': 'gymnastics.n.01', 'name': 'gymnastics'}, {'id': 1222, 'synset': 'acrobatics.n.01', 'name': 'acrobatics'}, {'id': 1223, 'synset': 'track_and_field.n.01', 'name': 'track_and_field'}, {'id': 1224, 'synset': 'track.n.11', 'name': 'track'}, {'id': 1225, 'synset': 'jumping.n.01', 'name': 'jumping'}, {'id': 1226, 'synset': 'broad_jump.n.02', 'name': 'broad_jump'}, {'id': 1227, 'synset': 'high_jump.n.02', 'name': 'high_jump'}, {'id': 1228, 'synset': 'fosbury_flop.n.01', 'name': 'Fosbury_flop'}, {'id': 1229, 'synset': 'skiing.n.01', 'name': 'skiing'}, {'id': 1230, 'synset': 'cross-country_skiing.n.01', 'name': 'cross-country_skiing'}, {'id': 1231, 'synset': 'ski_jumping.n.01', 'name': 'ski_jumping'}, {'id': 1232, 'synset': 'water_sport.n.01', 'name': 'water_sport'}, {'id': 1233, 'synset': 'swimming.n.01', 'name': 'swimming'}, {'id': 1234, 'synset': 'bathe.n.01', 'name': 'bathe'}, {'id': 1235, 'synset': 'dip.n.08', 'name': 'dip'}, {'id': 1236, 'synset': 'dive.n.02', 'name': 'dive'}, {'id': 1237, 'synset': 'floating.n.01', 'name': 'floating'}, {'id': 1238, 'synset': "dead-man's_float.n.01", 'name': "dead-man's_float"}, {'id': 1239, 'synset': 'belly_flop.n.01', 'name': 'belly_flop'}, {'id': 1240, 'synset': 'cliff_diving.n.01', 'name': 'cliff_diving'}, {'id': 1241, 'synset': 'flip.n.05', 'name': 'flip'}, {'id': 1242, 'synset': 'gainer.n.03', 'name': 'gainer'}, {'id': 1243, 'synset': 'half_gainer.n.01', 'name': 'half_gainer'}, {'id': 1244, 'synset': 'jackknife.n.02', 'name': 'jackknife'}, {'id': 1245, 'synset': 'swan_dive.n.01', 'name': 'swan_dive'}, {'id': 1246, 'synset': 'skin_diving.n.01', 'name': 'skin_diving'}, {'id': 1247, 'synset': 'scuba_diving.n.01', 'name': 'scuba_diving'}, {'id': 1248, 'synset': 'snorkeling.n.01', 'name': 'snorkeling'}, {'id': 1249, 'synset': 'surfing.n.01', 'name': 'surfing'}, {'id': 1250, 'synset': 'water-skiing.n.01', 'name': 'water-skiing'}, {'id': 1251, 'synset': 'rowing.n.01', 'name': 'rowing'}, {'id': 1252, 'synset': 'sculling.n.01', 'name': 'sculling'}, {'id': 1253, 'synset': 'boxing.n.01', 'name': 'boxing'}, {'id': 1254, 'synset': 'professional_boxing.n.01', 'name': 'professional_boxing'}, {'id': 1255, 'synset': 'in-fighting.n.02', 'name': 'in-fighting'}, {'id': 1256, 'synset': 'fight.n.05', 'name': 'fight'}, {'id': 1257, 'synset': 'rope-a-dope.n.01', 'name': 'rope-a-dope'}, {'id': 1258, 'synset': 'spar.n.03', 'name': 'spar'}, {'id': 1259, 'synset': 'archery.n.01', 'name': 'archery'}, {'id': 1260, 'synset': 'sledding.n.01', 'name': 'sledding'}, {'id': 1261, 'synset': 'tobogganing.n.01', 'name': 'tobogganing'}, {'id': 1262, 'synset': 'luging.n.01', 'name': 'luging'}, {'id': 1263, 'synset': 'bobsledding.n.01', 'name': 'bobsledding'}, {'id': 1264, 'synset': 'wrestling.n.02', 'name': 'wrestling'}, {'id': 1265, 'synset': 'greco-roman_wrestling.n.01', 'name': 'Greco-Roman_wrestling'}, {'id': 1266, 'synset': 'professional_wrestling.n.01', 'name': 'professional_wrestling'}, {'id': 1267, 'synset': 'sumo.n.01', 'name': 'sumo'}, {'id': 1268, 'synset': 'skating.n.01', 'name': 'skating'}, {'id': 1269, 'synset': 'ice_skating.n.01', 'name': 'ice_skating'}, {'id': 1270, 'synset': 'figure_skating.n.01', 'name': 'figure_skating'}, {'id': 1271, 'synset': 'rollerblading.n.01', 'name': 'rollerblading'}, {'id': 1272, 'synset': 'roller_skating.n.01', 'name': 'roller_skating'}, {'id': 1273, 'synset': 'skateboarding.n.01', 'name': 'skateboarding'}, {'id': 1274, 'synset': 'speed_skating.n.01', 'name': 'speed_skating'}, {'id': 1275, 'synset': 'racing.n.01', 'name': 'racing'}, {'id': 1276, 'synset': 'auto_racing.n.01', 'name': 'auto_racing'}, {'id': 1277, 'synset': 'boat_racing.n.01', 'name': 'boat_racing'}, {'id': 1278, 'synset': 'hydroplane_racing.n.01', 'name': 'hydroplane_racing'}, {'id': 1279, 'synset': 'camel_racing.n.01', 'name': 'camel_racing'}, {'id': 1280, 'synset': 'greyhound_racing.n.01', 'name': 'greyhound_racing'}, {'id': 1281, 'synset': 'horse_racing.n.01', 'name': 'horse_racing'}, {'id': 1282, 'synset': 'riding.n.01', 'name': 'riding'}, {'id': 1283, 'synset': 'equestrian_sport.n.01', 'name': 'equestrian_sport'}, {'id': 1284, 'synset': 'pony-trekking.n.01', 'name': 'pony-trekking'}, {'id': 1285, 'synset': 'showjumping.n.01', 'name': 'showjumping'}, {'id': 1286, 'synset': 'cross-country_riding.n.01', 'name': 'cross-country_riding'}, {'id': 1287, 'synset': 'cycling.n.01', 'name': 'cycling'}, {'id': 1288, 'synset': 'bicycling.n.01', 'name': 'bicycling'}, {'id': 1289, 'synset': 'motorcycling.n.01', 'name': 'motorcycling'}, {'id': 1290, 'synset': 'dune_cycling.n.01', 'name': 'dune_cycling'}, {'id': 1291, 'synset': 'blood_sport.n.01', 'name': 'blood_sport'}, {'id': 1292, 'synset': 'bullfighting.n.01', 'name': 'bullfighting'}, {'id': 1293, 'synset': 'cockfighting.n.01', 'name': 'cockfighting'}, {'id': 1294, 'synset': 'hunt.n.08', 'name': 'hunt'}, {'id': 1295, 'synset': 'battue.n.01', 'name': 'battue'}, {'id': 1296, 'synset': 'beagling.n.01', 'name': 'beagling'}, {'id': 1297, 'synset': 'coursing.n.01', 'name': 'coursing'}, {'id': 1298, 'synset': 'deer_hunting.n.01', 'name': 'deer_hunting'}, {'id': 1299, 'synset': 'ducking.n.01', 'name': 'ducking'}, {'id': 1300, 'synset': 'fox_hunting.n.01', 'name': 'fox_hunting'}, {'id': 1301, 'synset': 'pigsticking.n.01', 'name': 'pigsticking'}, {'id': 1302, 'synset': 'fishing.n.01', 'name': 'fishing'}, {'id': 1303, 'synset': 'angling.n.01', 'name': 'angling'}, {'id': 1304, 'synset': 'fly-fishing.n.01', 'name': 'fly-fishing'}, {'id': 1305, 'synset': 'troll.n.04', 'name': 'troll'}, {'id': 1306, 'synset': 'casting.n.03', 'name': 'casting'}, {'id': 1307, 'synset': 'bait_casting.n.01', 'name': 'bait_casting'}, {'id': 1308, 'synset': 'fly_casting.n.01', 'name': 'fly_casting'}, {'id': 1309, 'synset': 'overcast.n.04', 'name': 'overcast'}, {'id': 1310, 'synset': 'surf_casting.n.01', 'name': 'surf_casting'}, {'id': 1311, 'synset': 'day_game.n.01', 'name': 'day_game'}, {'id': 1312, 'synset': 'athletic_game.n.01', 'name': 'athletic_game'}, {'id': 1313, 'synset': 'ice_hockey.n.01', 'name': 'ice_hockey'}, {'id': 1314, 'synset': 'tetherball.n.01', 'name': 'tetherball'}, {'id': 1315, 'synset': 'water_polo.n.01', 'name': 'water_polo'}, {'id': 1316, 'synset': 'outdoor_game.n.01', 'name': 'outdoor_game'}, {'id': 1317, 'synset': 'golf.n.01', 'name': 'golf'}, {'id': 1318, 'synset': 'professional_golf.n.01', 'name': 'professional_golf'}, {'id': 1319, 'synset': 'round_of_golf.n.01', 'name': 'round_of_golf'}, {'id': 1320, 'synset': 'medal_play.n.01', 'name': 'medal_play'}, {'id': 1321, 'synset': 'match_play.n.01', 'name': 'match_play'}, {'id': 1322, 'synset': 'miniature_golf.n.01', 'name': 'miniature_golf'}, {'id': 1323, 'synset': 'croquet.n.01', 'name': 'croquet'}, {'id': 1324, 'synset': 'quoits.n.01', 'name': 'quoits'}, {'id': 1325, 'synset': 'shuffleboard.n.01', 'name': 'shuffleboard'}, {'id': 1326, 'synset': 'field_game.n.01', 'name': 'field_game'}, {'id': 1327, 'synset': 'field_hockey.n.01', 'name': 'field_hockey'}, {'id': 1328, 'synset': 'shinny.n.01', 'name': 'shinny'}, {'id': 1329, 'synset': 'football.n.01', 'name': 'football'}, {'id': 1330, 'synset': 'american_football.n.01', 'name': 'American_football'}, {'id': 1331, 'synset': 'professional_football.n.01', 'name': 'professional_football'}, {'id': 1332, 'synset': 'touch_football.n.01', 'name': 'touch_football'}, {'id': 1333, 'synset': 'hurling.n.01', 'name': 'hurling'}, {'id': 1334, 'synset': 'rugby.n.01', 'name': 'rugby'}, {'id': 1335, 'synset': 'ball_game.n.01', 'name': 'ball_game'}, {'id': 1336, 'synset': 'baseball.n.01', 'name': 'baseball'}, {'id': 1337, 'synset': 'ball.n.11', 'name': 'ball'}, {'id': 1338, 'synset': 'professional_baseball.n.01', 'name': 'professional_baseball'}, {'id': 1339, 'synset': 'hardball.n.02', 'name': 'hardball'}, {'id': 1340, 'synset': 'perfect_game.n.01', 'name': 'perfect_game'}, {'id': 1341, 'synset': 'no-hit_game.n.01', 'name': 'no-hit_game'}, {'id': 1342, 'synset': 'one-hitter.n.01', 'name': 'one-hitter'}, {'id': 1343, 'synset': 'two-hitter.n.01', 'name': 'two-hitter'}, {'id': 1344, 'synset': 'three-hitter.n.01', 'name': 'three-hitter'}, {'id': 1345, 'synset': 'four-hitter.n.01', 'name': 'four-hitter'}, {'id': 1346, 'synset': 'five-hitter.n.01', 'name': 'five-hitter'}, {'id': 1347, 'synset': 'softball.n.02', 'name': 'softball'}, {'id': 1348, 'synset': 'rounders.n.01', 'name': 'rounders'}, {'id': 1349, 'synset': 'stickball.n.01', 'name': 'stickball'}, {'id': 1350, 'synset': 'cricket.n.02', 'name': 'cricket'}, {'id': 1351, 'synset': 'lacrosse.n.01', 'name': 'lacrosse'}, {'id': 1352, 'synset': 'polo.n.02', 'name': 'polo'}, {'id': 1353, 'synset': 'pushball.n.01', 'name': 'pushball'}, {'id': 1354, 'synset': 'soccer.n.01', 'name': 'soccer'}, {'id': 1355, 'synset': 'court_game.n.01', 'name': 'court_game'}, {'id': 1356, 'synset': 'handball.n.02', 'name': 'handball'}, {'id': 1357, 'synset': 'racquetball.n.02', 'name': 'racquetball'}, {'id': 1358, 'synset': 'fives.n.01', 'name': 'fives'}, {'id': 1359, 'synset': 'squash.n.03', 'name': 'squash'}, {'id': 1360, 'synset': 'volleyball.n.01', 'name': 'volleyball'}, {'id': 1361, 'synset': 'jai_alai.n.01', 'name': 'jai_alai'}, {'id': 1362, 'synset': 'badminton.n.01', 'name': 'badminton'}, {'id': 1363, 'synset': 'battledore.n.02', 'name': 'battledore'}, {'id': 1364, 'synset': 'basketball.n.01', 'name': 'basketball'}, {'id': 1365, 'synset': 'professional_basketball.n.01', 'name': 'professional_basketball'}, {'id': 1366, 'synset': 'deck_tennis.n.01', 'name': 'deck_tennis'}, {'id': 1367, 'synset': 'netball.n.01', 'name': 'netball'}, {'id': 1368, 'synset': 'tennis.n.01', 'name': 'tennis'}, {'id': 1369, 'synset': 'professional_tennis.n.01', 'name': 'professional_tennis'}, {'id': 1370, 'synset': 'singles.n.02', 'name': 'singles'}, {'id': 1371, 'synset': 'singles.n.01', 'name': 'singles'}, {'id': 1372, 'synset': 'doubles.n.02', 'name': 'doubles'}, {'id': 1373, 'synset': 'doubles.n.01', 'name': 'doubles'}, {'id': 1374, 'synset': 'royal_tennis.n.01', 'name': 'royal_tennis'}, {'id': 1375, 'synset': 'pallone.n.01', 'name': 'pallone'}, {'id': 1376, 'synset': 'sport.n.01', 'name': 'sport'}, {'id': 1377, 'synset': 'clasp.n.02', 'name': 'clasp'}, {'id': 1378, 'synset': 'judo.n.01', 'name': 'judo'}, {'id': 1379, 'synset': 'team_sport.n.01', 'name': 'team_sport'}, {'id': 1380, 'synset': 'last_supper.n.01', 'name': 'Last_Supper'}, {'id': 1381, 'synset': 'seder.n.01', 'name': 'Seder'}, {'id': 1382, 'synset': 'camping.n.01', 'name': 'camping'}, {'id': 1383, 'synset': 'pest.n.04', 'name': 'pest'}, {'id': 1384, 'synset': 'critter.n.01', 'name': 'critter'}, {'id': 1385, 'synset': 'creepy-crawly.n.01', 'name': 'creepy-crawly'}, {'id': 1386, 'synset': 'darter.n.02', 'name': 'darter'}, {'id': 1387, 'synset': 'peeper.n.03', 'name': 'peeper'}, {'id': 1388, 'synset': 'homeotherm.n.01', 'name': 'homeotherm'}, {'id': 1389, 'synset': 'poikilotherm.n.01', 'name': 'poikilotherm'}, {'id': 1390, 'synset': 'range_animal.n.01', 'name': 'range_animal'}, {'id': 1391, 'synset': 'scavenger.n.03', 'name': 'scavenger'}, {'id': 1392, 'synset': 'bottom-feeder.n.02', 'name': 'bottom-feeder'}, {'id': 1393, 'synset': 'bottom-feeder.n.01', 'name': 'bottom-feeder'}, {'id': 1394, 'synset': 'work_animal.n.01', 'name': 'work_animal'}, {'id': 1395, 'synset': 'beast_of_burden.n.01', 'name': 'beast_of_burden'}, {'id': 1396, 'synset': 'draft_animal.n.01', 'name': 'draft_animal'}, {'id': 1397, 'synset': 'pack_animal.n.01', 'name': 'pack_animal'}, {'id': 1398, 'synset': 'domestic_animal.n.01', 'name': 'domestic_animal'}, {'id': 1399, 'synset': 'feeder.n.01', 'name': 'feeder'}, {'id': 1400, 'synset': 'feeder.n.06', 'name': 'feeder'}, {'id': 1401, 'synset': 'stocker.n.01', 'name': 'stocker'}, {'id': 1402, 'synset': 'hatchling.n.01', 'name': 'hatchling'}, {'id': 1403, 'synset': 'head.n.02', 'name': 'head'}, {'id': 1404, 'synset': 'migrator.n.02', 'name': 'migrator'}, {'id': 1405, 'synset': 'molter.n.01', 'name': 'molter'}, {'id': 1406, 'synset': 'stayer.n.01', 'name': 'stayer'}, {'id': 1407, 'synset': 'stunt.n.02', 'name': 'stunt'}, {'id': 1408, 'synset': 'marine_animal.n.01', 'name': 'marine_animal'}, {'id': 1409, 'synset': 'by-catch.n.01', 'name': 'by-catch'}, {'id': 1410, 'synset': 'female.n.01', 'name': 'female'}, {'id': 1411, 'synset': 'hen.n.04', 'name': 'hen'}, {'id': 1412, 'synset': 'male.n.01', 'name': 'male'}, {'id': 1413, 'synset': 'adult.n.02', 'name': 'adult'}, {'id': 1414, 'synset': 'young.n.01', 'name': 'young'}, {'id': 1415, 'synset': 'orphan.n.04', 'name': 'orphan'}, {'id': 1416, 'synset': 'young_mammal.n.01', 'name': 'young_mammal'}, {'id': 1417, 'synset': 'baby.n.06', 'name': 'baby'}, {'id': 1418, 'synset': 'pup.n.01', 'name': 'pup'}, {'id': 1419, 'synset': 'wolf_pup.n.01', 'name': 'wolf_pup'}, {'id': 1420, 'synset': 'lion_cub.n.01', 'name': 'lion_cub'}, {'id': 1421, 'synset': 'bear_cub.n.01', 'name': 'bear_cub'}, {'id': 1422, 'synset': 'tiger_cub.n.01', 'name': 'tiger_cub'}, {'id': 1423, 'synset': 'kit.n.03', 'name': 'kit'}, {'id': 1424, 'synset': 'suckling.n.03', 'name': 'suckling'}, {'id': 1425, 'synset': 'sire.n.03', 'name': 'sire'}, {'id': 1426, 'synset': 'dam.n.03', 'name': 'dam'}, {'id': 1427, 'synset': 'thoroughbred.n.03', 'name': 'thoroughbred'}, {'id': 1428, 'synset': 'giant.n.01', 'name': 'giant'}, {'id': 1429, 'synset': 'mutant.n.02', 'name': 'mutant'}, {'id': 1430, 'synset': 'carnivore.n.02', 'name': 'carnivore'}, {'id': 1431, 'synset': 'herbivore.n.01', 'name': 'herbivore'}, {'id': 1432, 'synset': 'insectivore.n.02', 'name': 'insectivore'}, {'id': 1433, 'synset': 'acrodont.n.01', 'name': 'acrodont'}, {'id': 1434, 'synset': 'pleurodont.n.01', 'name': 'pleurodont'}, {'id': 1435, 'synset': 'microorganism.n.01', 'name': 'microorganism'}, {'id': 1436, 'synset': 'monohybrid.n.01', 'name': 'monohybrid'}, {'id': 1437, 'synset': 'arbovirus.n.01', 'name': 'arbovirus'}, {'id': 1438, 'synset': 'adenovirus.n.01', 'name': 'adenovirus'}, {'id': 1439, 'synset': 'arenavirus.n.01', 'name': 'arenavirus'}, {'id': 1440, 'synset': 'marburg_virus.n.01', 'name': 'Marburg_virus'}, {'id': 1441, 'synset': 'arenaviridae.n.01', 'name': 'Arenaviridae'}, {'id': 1442, 'synset': 'vesiculovirus.n.01', 'name': 'vesiculovirus'}, {'id': 1443, 'synset': 'reoviridae.n.01', 'name': 'Reoviridae'}, {'id': 1444, 'synset': 'variola_major.n.02', 'name': 'variola_major'}, {'id': 1445, 'synset': 'viroid.n.01', 'name': 'viroid'}, {'id': 1446, 'synset': 'coliphage.n.01', 'name': 'coliphage'}, {'id': 1447, 'synset': 'paramyxovirus.n.01', 'name': 'paramyxovirus'}, {'id': 1448, 'synset': 'poliovirus.n.01', 'name': 'poliovirus'}, {'id': 1449, 'synset': 'herpes.n.02', 'name': 'herpes'}, {'id': 1450, 'synset': 'herpes_simplex_1.n.01', 'name': 'herpes_simplex_1'}, {'id': 1451, 'synset': 'herpes_zoster.n.02', 'name': 'herpes_zoster'}, {'id': 1452, 'synset': 'herpes_varicella_zoster.n.01', 'name': 'herpes_varicella_zoster'}, {'id': 1453, 'synset': 'cytomegalovirus.n.01', 'name': 'cytomegalovirus'}, {'id': 1454, 'synset': 'varicella_zoster_virus.n.01', 'name': 'varicella_zoster_virus'}, {'id': 1455, 'synset': 'polyoma.n.01', 'name': 'polyoma'}, {'id': 1456, 'synset': 'lyssavirus.n.01', 'name': 'lyssavirus'}, {'id': 1457, 'synset': 'reovirus.n.01', 'name': 'reovirus'}, {'id': 1458, 'synset': 'rotavirus.n.01', 'name': 'rotavirus'}, {'id': 1459, 'synset': 'moneran.n.01', 'name': 'moneran'}, {'id': 1460, 'synset': 'archaebacteria.n.01', 'name': 'archaebacteria'}, {'id': 1461, 'synset': 'bacteroid.n.01', 'name': 'bacteroid'}, {'id': 1462, 'synset': 'bacillus_anthracis.n.01', 'name': 'Bacillus_anthracis'}, {'id': 1463, 'synset': 'yersinia_pestis.n.01', 'name': 'Yersinia_pestis'}, {'id': 1464, 'synset': 'brucella.n.01', 'name': 'Brucella'}, {'id': 1465, 'synset': 'spirillum.n.02', 'name': 'spirillum'}, {'id': 1466, 'synset': 'botulinus.n.01', 'name': 'botulinus'}, {'id': 1467, 'synset': 'clostridium_perfringens.n.01', 'name': 'clostridium_perfringens'}, {'id': 1468, 'synset': 'cyanobacteria.n.01', 'name': 'cyanobacteria'}, {'id': 1469, 'synset': 'trichodesmium.n.01', 'name': 'trichodesmium'}, {'id': 1470, 'synset': 'nitric_bacteria.n.01', 'name': 'nitric_bacteria'}, {'id': 1471, 'synset': 'spirillum.n.01', 'name': 'spirillum'}, {'id': 1472, 'synset': 'francisella.n.01', 'name': 'Francisella'}, {'id': 1473, 'synset': 'gonococcus.n.01', 'name': 'gonococcus'}, {'id': 1474, 'synset': 'corynebacterium_diphtheriae.n.01', 'name': 'Corynebacterium_diphtheriae'}, {'id': 1475, 'synset': 'enteric_bacteria.n.01', 'name': 'enteric_bacteria'}, {'id': 1476, 'synset': 'klebsiella.n.01', 'name': 'klebsiella'}, {'id': 1477, 'synset': 'salmonella_typhimurium.n.01', 'name': 'Salmonella_typhimurium'}, {'id': 1478, 'synset': 'typhoid_bacillus.n.01', 'name': 'typhoid_bacillus'}, {'id': 1479, 'synset': 'nitrate_bacterium.n.01', 'name': 'nitrate_bacterium'}, {'id': 1480, 'synset': 'nitrite_bacterium.n.01', 'name': 'nitrite_bacterium'}, {'id': 1481, 'synset': 'actinomycete.n.01', 'name': 'actinomycete'}, {'id': 1482, 'synset': 'streptomyces.n.01', 'name': 'streptomyces'}, {'id': 1483, 'synset': 'streptomyces_erythreus.n.01', 'name': 'Streptomyces_erythreus'}, {'id': 1484, 'synset': 'streptomyces_griseus.n.01', 'name': 'Streptomyces_griseus'}, {'id': 1485, 'synset': 'tubercle_bacillus.n.01', 'name': 'tubercle_bacillus'}, {'id': 1486, 'synset': 'pus-forming_bacteria.n.01', 'name': 'pus-forming_bacteria'}, {'id': 1487, 'synset': 'streptobacillus.n.01', 'name': 'streptobacillus'}, {'id': 1488, 'synset': 'myxobacteria.n.01', 'name': 'myxobacteria'}, {'id': 1489, 'synset': 'staphylococcus.n.01', 'name': 'staphylococcus'}, {'id': 1490, 'synset': 'diplococcus.n.01', 'name': 'diplococcus'}, {'id': 1491, 'synset': 'pneumococcus.n.01', 'name': 'pneumococcus'}, {'id': 1492, 'synset': 'streptococcus.n.01', 'name': 'streptococcus'}, {'id': 1493, 'synset': 'spirochete.n.01', 'name': 'spirochete'}, {'id': 1494, 'synset': 'planktonic_algae.n.01', 'name': 'planktonic_algae'}, {'id': 1495, 'synset': 'zooplankton.n.01', 'name': 'zooplankton'}, {'id': 1496, 'synset': 'parasite.n.01', 'name': 'parasite'}, {'id': 1497, 'synset': 'endoparasite.n.01', 'name': 'endoparasite'}, {'id': 1498, 'synset': 'ectoparasite.n.01', 'name': 'ectoparasite'}, {'id': 1499, 'synset': 'pathogen.n.01', 'name': 'pathogen'}, {'id': 1500, 'synset': 'commensal.n.01', 'name': 'commensal'}, {'id': 1501, 'synset': 'myrmecophile.n.01', 'name': 'myrmecophile'}, {'id': 1502, 'synset': 'protoctist.n.01', 'name': 'protoctist'}, {'id': 1503, 'synset': 'protozoan.n.01', 'name': 'protozoan'}, {'id': 1504, 'synset': 'sarcodinian.n.01', 'name': 'sarcodinian'}, {'id': 1505, 'synset': 'heliozoan.n.01', 'name': 'heliozoan'}, {'id': 1506, 'synset': 'endameba.n.01', 'name': 'endameba'}, {'id': 1507, 'synset': 'ameba.n.01', 'name': 'ameba'}, {'id': 1508, 'synset': 'globigerina.n.01', 'name': 'globigerina'}, {'id': 1509, 'synset': 'testacean.n.01', 'name': 'testacean'}, {'id': 1510, 'synset': 'arcella.n.01', 'name': 'arcella'}, {'id': 1511, 'synset': 'difflugia.n.01', 'name': 'difflugia'}, {'id': 1512, 'synset': 'ciliate.n.01', 'name': 'ciliate'}, {'id': 1513, 'synset': 'paramecium.n.01', 'name': 'paramecium'}, {'id': 1514, 'synset': 'stentor.n.03', 'name': 'stentor'}, {'id': 1515, 'synset': 'alga.n.01', 'name': 'alga'}, {'id': 1516, 'synset': 'arame.n.01', 'name': 'arame'}, {'id': 1517, 'synset': 'seagrass.n.01', 'name': 'seagrass'}, {'id': 1518, 'synset': 'golden_algae.n.01', 'name': 'golden_algae'}, {'id': 1519, 'synset': 'yellow-green_algae.n.01', 'name': 'yellow-green_algae'}, {'id': 1520, 'synset': 'brown_algae.n.01', 'name': 'brown_algae'}, {'id': 1521, 'synset': 'kelp.n.01', 'name': 'kelp'}, {'id': 1522, 'synset': 'fucoid.n.02', 'name': 'fucoid'}, {'id': 1523, 'synset': 'fucoid.n.01', 'name': 'fucoid'}, {'id': 1524, 'synset': 'fucus.n.01', 'name': 'fucus'}, {'id': 1525, 'synset': 'bladderwrack.n.01', 'name': 'bladderwrack'}, {'id': 1526, 'synset': 'green_algae.n.01', 'name': 'green_algae'}, {'id': 1527, 'synset': 'pond_scum.n.01', 'name': 'pond_scum'}, {'id': 1528, 'synset': 'chlorella.n.01', 'name': 'chlorella'}, {'id': 1529, 'synset': 'stonewort.n.01', 'name': 'stonewort'}, {'id': 1530, 'synset': 'desmid.n.01', 'name': 'desmid'}, {'id': 1531, 'synset': 'sea_moss.n.02', 'name': 'sea_moss'}, {'id': 1532, 'synset': 'eukaryote.n.01', 'name': 'eukaryote'}, {'id': 1533, 'synset': 'prokaryote.n.01', 'name': 'prokaryote'}, {'id': 1534, 'synset': 'zooid.n.01', 'name': 'zooid'}, {'id': 1535, 'synset': 'leishmania.n.01', 'name': 'Leishmania'}, {'id': 1536, 'synset': 'zoomastigote.n.01', 'name': 'zoomastigote'}, {'id': 1537, 'synset': 'polymastigote.n.01', 'name': 'polymastigote'}, {'id': 1538, 'synset': 'costia.n.01', 'name': 'costia'}, {'id': 1539, 'synset': 'giardia.n.01', 'name': 'giardia'}, {'id': 1540, 'synset': 'cryptomonad.n.01', 'name': 'cryptomonad'}, {'id': 1541, 'synset': 'sporozoan.n.01', 'name': 'sporozoan'}, {'id': 1542, 'synset': 'sporozoite.n.01', 'name': 'sporozoite'}, {'id': 1543, 'synset': 'trophozoite.n.01', 'name': 'trophozoite'}, {'id': 1544, 'synset': 'merozoite.n.01', 'name': 'merozoite'}, {'id': 1545, 'synset': 'coccidium.n.01', 'name': 'coccidium'}, {'id': 1546, 'synset': 'gregarine.n.01', 'name': 'gregarine'}, {'id': 1547, 'synset': 'plasmodium.n.02', 'name': 'plasmodium'}, {'id': 1548, 'synset': 'leucocytozoan.n.01', 'name': 'leucocytozoan'}, {'id': 1549, 'synset': 'microsporidian.n.01', 'name': 'microsporidian'}, {'id': 1550, 'synset': 'ostariophysi.n.01', 'name': 'Ostariophysi'}, {'id': 1551, 'synset': 'cypriniform_fish.n.01', 'name': 'cypriniform_fish'}, {'id': 1552, 'synset': 'loach.n.01', 'name': 'loach'}, {'id': 1553, 'synset': 'cyprinid.n.01', 'name': 'cyprinid'}, {'id': 1554, 'synset': 'carp.n.02', 'name': 'carp'}, {'id': 1555, 'synset': 'domestic_carp.n.01', 'name': 'domestic_carp'}, {'id': 1556, 'synset': 'leather_carp.n.01', 'name': 'leather_carp'}, {'id': 1557, 'synset': 'mirror_carp.n.01', 'name': 'mirror_carp'}, {'id': 1558, 'synset': 'european_bream.n.01', 'name': 'European_bream'}, {'id': 1559, 'synset': 'tench.n.01', 'name': 'tench'}, {'id': 1560, 'synset': 'dace.n.01', 'name': 'dace'}, {'id': 1561, 'synset': 'chub.n.01', 'name': 'chub'}, {'id': 1562, 'synset': 'shiner.n.04', 'name': 'shiner'}, {'id': 1563, 'synset': 'common_shiner.n.01', 'name': 'common_shiner'}, {'id': 1564, 'synset': 'roach.n.05', 'name': 'roach'}, {'id': 1565, 'synset': 'rudd.n.01', 'name': 'rudd'}, {'id': 1566, 'synset': 'minnow.n.01', 'name': 'minnow'}, {'id': 1567, 'synset': 'gudgeon.n.02', 'name': 'gudgeon'}, {'id': 1568, 'synset': 'crucian_carp.n.01', 'name': 'crucian_carp'}, {'id': 1569, 'synset': 'electric_eel.n.01', 'name': 'electric_eel'}, {'id': 1570, 'synset': 'catostomid.n.01', 'name': 'catostomid'}, {'id': 1571, 'synset': 'buffalo_fish.n.01', 'name': 'buffalo_fish'}, {'id': 1572, 'synset': 'black_buffalo.n.01', 'name': 'black_buffalo'}, {'id': 1573, 'synset': 'hog_sucker.n.01', 'name': 'hog_sucker'}, {'id': 1574, 'synset': 'redhorse.n.01', 'name': 'redhorse'}, {'id': 1575, 'synset': 'cyprinodont.n.01', 'name': 'cyprinodont'}, {'id': 1576, 'synset': 'killifish.n.01', 'name': 'killifish'}, {'id': 1577, 'synset': 'mummichog.n.01', 'name': 'mummichog'}, {'id': 1578, 'synset': 'striped_killifish.n.01', 'name': 'striped_killifish'}, {'id': 1579, 'synset': 'rivulus.n.01', 'name': 'rivulus'}, {'id': 1580, 'synset': 'flagfish.n.01', 'name': 'flagfish'}, {'id': 1581, 'synset': 'swordtail.n.01', 'name': 'swordtail'}, {'id': 1582, 'synset': 'guppy.n.01', 'name': 'guppy'}, {'id': 1583, 'synset': 'topminnow.n.01', 'name': 'topminnow'}, {'id': 1584, 'synset': 'mosquitofish.n.01', 'name': 'mosquitofish'}, {'id': 1585, 'synset': 'platy.n.01', 'name': 'platy'}, {'id': 1586, 'synset': 'mollie.n.01', 'name': 'mollie'}, {'id': 1587, 'synset': 'squirrelfish.n.02', 'name': 'squirrelfish'}, {'id': 1588, 'synset': 'reef_squirrelfish.n.01', 'name': 'reef_squirrelfish'}, {'id': 1589, 'synset': 'deepwater_squirrelfish.n.01', 'name': 'deepwater_squirrelfish'}, {'id': 1590, 'synset': 'holocentrus_ascensionis.n.01', 'name': 'Holocentrus_ascensionis'}, {'id': 1591, 'synset': 'soldierfish.n.01', 'name': 'soldierfish'}, {'id': 1592, 'synset': 'anomalops.n.01', 'name': 'anomalops'}, {'id': 1593, 'synset': 'flashlight_fish.n.01', 'name': 'flashlight_fish'}, {'id': 1594, 'synset': 'john_dory.n.01', 'name': 'John_Dory'}, {'id': 1595, 'synset': 'boarfish.n.02', 'name': 'boarfish'}, {'id': 1596, 'synset': 'boarfish.n.01', 'name': 'boarfish'}, {'id': 1597, 'synset': 'cornetfish.n.01', 'name': 'cornetfish'}, {'id': 1598, 'synset': 'stickleback.n.01', 'name': 'stickleback'}, {'id': 1599, 'synset': 'three-spined_stickleback.n.01', 'name': 'three-spined_stickleback'}, {'id': 1600, 'synset': 'ten-spined_stickleback.n.01', 'name': 'ten-spined_stickleback'}, {'id': 1601, 'synset': 'pipefish.n.01', 'name': 'pipefish'}, {'id': 1602, 'synset': 'dwarf_pipefish.n.01', 'name': 'dwarf_pipefish'}, {'id': 1603, 'synset': 'deepwater_pipefish.n.01', 'name': 'deepwater_pipefish'}, {'id': 1604, 'synset': 'snipefish.n.01', 'name': 'snipefish'}, {'id': 1605, 'synset': 'shrimpfish.n.01', 'name': 'shrimpfish'}, {'id': 1606, 'synset': 'trumpetfish.n.01', 'name': 'trumpetfish'}, {'id': 1607, 'synset': 'pellicle.n.01', 'name': 'pellicle'}, {'id': 1608, 'synset': 'embryo.n.02', 'name': 'embryo'}, {'id': 1609, 'synset': 'fetus.n.01', 'name': 'fetus'}, {'id': 1610, 'synset': 'abortus.n.01', 'name': 'abortus'}, {'id': 1611, 'synset': 'spawn.n.01', 'name': 'spawn'}, {'id': 1612, 'synset': 'blastula.n.01', 'name': 'blastula'}, {'id': 1613, 'synset': 'blastocyst.n.01', 'name': 'blastocyst'}, {'id': 1614, 'synset': 'gastrula.n.01', 'name': 'gastrula'}, {'id': 1615, 'synset': 'morula.n.01', 'name': 'morula'}, {'id': 1616, 'synset': 'yolk.n.02', 'name': 'yolk'}, {'id': 1617, 'synset': 'chordate.n.01', 'name': 'chordate'}, {'id': 1618, 'synset': 'cephalochordate.n.01', 'name': 'cephalochordate'}, {'id': 1619, 'synset': 'lancelet.n.01', 'name': 'lancelet'}, {'id': 1620, 'synset': 'tunicate.n.01', 'name': 'tunicate'}, {'id': 1621, 'synset': 'ascidian.n.01', 'name': 'ascidian'}, {'id': 1622, 'synset': 'sea_squirt.n.01', 'name': 'sea_squirt'}, {'id': 1623, 'synset': 'salp.n.01', 'name': 'salp'}, {'id': 1624, 'synset': 'doliolum.n.01', 'name': 'doliolum'}, {'id': 1625, 'synset': 'larvacean.n.01', 'name': 'larvacean'}, {'id': 1626, 'synset': 'appendicularia.n.01', 'name': 'appendicularia'}, {'id': 1627, 'synset': 'ascidian_tadpole.n.01', 'name': 'ascidian_tadpole'}, {'id': 1628, 'synset': 'vertebrate.n.01', 'name': 'vertebrate'}, {'id': 1629, 'synset': 'amniota.n.01', 'name': 'Amniota'}, {'id': 1630, 'synset': 'amniote.n.01', 'name': 'amniote'}, {'id': 1631, 'synset': 'aquatic_vertebrate.n.01', 'name': 'aquatic_vertebrate'}, {'id': 1632, 'synset': 'jawless_vertebrate.n.01', 'name': 'jawless_vertebrate'}, {'id': 1633, 'synset': 'ostracoderm.n.01', 'name': 'ostracoderm'}, {'id': 1634, 'synset': 'heterostracan.n.01', 'name': 'heterostracan'}, {'id': 1635, 'synset': 'anaspid.n.01', 'name': 'anaspid'}, {'id': 1636, 'synset': 'conodont.n.02', 'name': 'conodont'}, {'id': 1637, 'synset': 'cyclostome.n.01', 'name': 'cyclostome'}, {'id': 1638, 'synset': 'lamprey.n.01', 'name': 'lamprey'}, {'id': 1639, 'synset': 'sea_lamprey.n.01', 'name': 'sea_lamprey'}, {'id': 1640, 'synset': 'hagfish.n.01', 'name': 'hagfish'}, {'id': 1641, 'synset': 'myxine_glutinosa.n.01', 'name': 'Myxine_glutinosa'}, {'id': 1642, 'synset': 'eptatretus.n.01', 'name': 'eptatretus'}, {'id': 1643, 'synset': 'gnathostome.n.01', 'name': 'gnathostome'}, {'id': 1644, 'synset': 'placoderm.n.01', 'name': 'placoderm'}, {'id': 1645, 'synset': 'cartilaginous_fish.n.01', 'name': 'cartilaginous_fish'}, {'id': 1646, 'synset': 'holocephalan.n.01', 'name': 'holocephalan'}, {'id': 1647, 'synset': 'chimaera.n.03', 'name': 'chimaera'}, {'id': 1648, 'synset': 'rabbitfish.n.01', 'name': 'rabbitfish'}, {'id': 1649, 'synset': 'elasmobranch.n.01', 'name': 'elasmobranch'}, {'id': 1650, 'synset': 'cow_shark.n.01', 'name': 'cow_shark'}, {'id': 1651, 'synset': 'mackerel_shark.n.01', 'name': 'mackerel_shark'}, {'id': 1652, 'synset': 'porbeagle.n.01', 'name': 'porbeagle'}, {'id': 1653, 'synset': 'mako.n.01', 'name': 'mako'}, {'id': 1654, 'synset': 'shortfin_mako.n.01', 'name': 'shortfin_mako'}, {'id': 1655, 'synset': 'longfin_mako.n.01', 'name': 'longfin_mako'}, {'id': 1656, 'synset': 'bonito_shark.n.01', 'name': 'bonito_shark'}, {'id': 1657, 'synset': 'great_white_shark.n.01', 'name': 'great_white_shark'}, {'id': 1658, 'synset': 'basking_shark.n.01', 'name': 'basking_shark'}, {'id': 1659, 'synset': 'thresher.n.02', 'name': 'thresher'}, {'id': 1660, 'synset': 'carpet_shark.n.01', 'name': 'carpet_shark'}, {'id': 1661, 'synset': 'nurse_shark.n.01', 'name': 'nurse_shark'}, {'id': 1662, 'synset': 'sand_tiger.n.01', 'name': 'sand_tiger'}, {'id': 1663, 'synset': 'whale_shark.n.01', 'name': 'whale_shark'}, {'id': 1664, 'synset': 'requiem_shark.n.01', 'name': 'requiem_shark'}, {'id': 1665, 'synset': 'bull_shark.n.01', 'name': 'bull_shark'}, {'id': 1666, 'synset': 'sandbar_shark.n.02', 'name': 'sandbar_shark'}, {'id': 1667, 'synset': 'blacktip_shark.n.01', 'name': 'blacktip_shark'}, {'id': 1668, 'synset': 'whitetip_shark.n.02', 'name': 'whitetip_shark'}, {'id': 1669, 'synset': 'dusky_shark.n.01', 'name': 'dusky_shark'}, {'id': 1670, 'synset': 'lemon_shark.n.01', 'name': 'lemon_shark'}, {'id': 1671, 'synset': 'blue_shark.n.01', 'name': 'blue_shark'}, {'id': 1672, 'synset': 'tiger_shark.n.01', 'name': 'tiger_shark'}, {'id': 1673, 'synset': 'soupfin_shark.n.01', 'name': 'soupfin_shark'}, {'id': 1674, 'synset': 'dogfish.n.02', 'name': 'dogfish'}, {'id': 1675, 'synset': 'smooth_dogfish.n.01', 'name': 'smooth_dogfish'}, {'id': 1676, 'synset': 'smoothhound.n.01', 'name': 'smoothhound'}, {'id': 1677, 'synset': 'american_smooth_dogfish.n.01', 'name': 'American_smooth_dogfish'}, {'id': 1678, 'synset': 'florida_smoothhound.n.01', 'name': 'Florida_smoothhound'}, {'id': 1679, 'synset': 'whitetip_shark.n.01', 'name': 'whitetip_shark'}, {'id': 1680, 'synset': 'spiny_dogfish.n.01', 'name': 'spiny_dogfish'}, {'id': 1681, 'synset': 'atlantic_spiny_dogfish.n.01', 'name': 'Atlantic_spiny_dogfish'}, {'id': 1682, 'synset': 'pacific_spiny_dogfish.n.01', 'name': 'Pacific_spiny_dogfish'}, {'id': 1683, 'synset': 'hammerhead.n.03', 'name': 'hammerhead'}, {'id': 1684, 'synset': 'smooth_hammerhead.n.01', 'name': 'smooth_hammerhead'}, {'id': 1685, 'synset': 'smalleye_hammerhead.n.01', 'name': 'smalleye_hammerhead'}, {'id': 1686, 'synset': 'shovelhead.n.01', 'name': 'shovelhead'}, {'id': 1687, 'synset': 'angel_shark.n.01', 'name': 'angel_shark'}, {'id': 1688, 'synset': 'ray.n.07', 'name': 'ray'}, {'id': 1689, 'synset': 'electric_ray.n.01', 'name': 'electric_ray'}, {'id': 1690, 'synset': 'sawfish.n.01', 'name': 'sawfish'}, {'id': 1691, 'synset': 'smalltooth_sawfish.n.01', 'name': 'smalltooth_sawfish'}, {'id': 1692, 'synset': 'guitarfish.n.01', 'name': 'guitarfish'}, {'id': 1693, 'synset': 'stingray.n.01', 'name': 'stingray'}, {'id': 1694, 'synset': 'roughtail_stingray.n.01', 'name': 'roughtail_stingray'}, {'id': 1695, 'synset': 'butterfly_ray.n.01', 'name': 'butterfly_ray'}, {'id': 1696, 'synset': 'eagle_ray.n.01', 'name': 'eagle_ray'}, {'id': 1697, 'synset': 'spotted_eagle_ray.n.01', 'name': 'spotted_eagle_ray'}, {'id': 1698, 'synset': 'cownose_ray.n.01', 'name': 'cownose_ray'}, {'id': 1699, 'synset': 'manta.n.02', 'name': 'manta'}, {'id': 1700, 'synset': 'atlantic_manta.n.01', 'name': 'Atlantic_manta'}, {'id': 1701, 'synset': 'devil_ray.n.01', 'name': 'devil_ray'}, {'id': 1702, 'synset': 'skate.n.02', 'name': 'skate'}, {'id': 1703, 'synset': 'grey_skate.n.01', 'name': 'grey_skate'}, {'id': 1704, 'synset': 'little_skate.n.01', 'name': 'little_skate'}, {'id': 1705, 'synset': 'thorny_skate.n.01', 'name': 'thorny_skate'}, {'id': 1706, 'synset': 'barndoor_skate.n.01', 'name': 'barndoor_skate'}, {'id': 1707, 'synset': 'dickeybird.n.01', 'name': 'dickeybird'}, {'id': 1708, 'synset': 'fledgling.n.02', 'name': 'fledgling'}, {'id': 1709, 'synset': 'nestling.n.01', 'name': 'nestling'}, {'id': 1710, 'synset': 'cock.n.05', 'name': 'cock'}, {'id': 1711, 'synset': 'gamecock.n.01', 'name': 'gamecock'}, {'id': 1712, 'synset': 'hen.n.02', 'name': 'hen'}, {'id': 1713, 'synset': 'nester.n.02', 'name': 'nester'}, {'id': 1714, 'synset': 'night_bird.n.01', 'name': 'night_bird'}, {'id': 1715, 'synset': 'night_raven.n.02', 'name': 'night_raven'}, {'id': 1716, 'synset': 'bird_of_passage.n.02', 'name': 'bird_of_passage'}, {'id': 1717, 'synset': 'archaeopteryx.n.01', 'name': 'archaeopteryx'}, {'id': 1718, 'synset': 'archaeornis.n.01', 'name': 'archaeornis'}, {'id': 1719, 'synset': 'ratite.n.01', 'name': 'ratite'}, {'id': 1720, 'synset': 'carinate.n.01', 'name': 'carinate'}, {'id': 1721, 'synset': 'cassowary.n.01', 'name': 'cassowary'}, {'id': 1722, 'synset': 'emu.n.02', 'name': 'emu'}, {'id': 1723, 'synset': 'kiwi.n.04', 'name': 'kiwi'}, {'id': 1724, 'synset': 'rhea.n.03', 'name': 'rhea'}, {'id': 1725, 'synset': 'rhea.n.02', 'name': 'rhea'}, {'id': 1726, 'synset': 'elephant_bird.n.01', 'name': 'elephant_bird'}, {'id': 1727, 'synset': 'moa.n.01', 'name': 'moa'}, {'id': 1728, 'synset': 'passerine.n.01', 'name': 'passerine'}, {'id': 1729, 'synset': 'nonpasserine_bird.n.01', 'name': 'nonpasserine_bird'}, {'id': 1730, 'synset': 'oscine.n.01', 'name': 'oscine'}, {'id': 1731, 'synset': 'songbird.n.01', 'name': 'songbird'}, {'id': 1732, 'synset': 'honey_eater.n.01', 'name': 'honey_eater'}, {'id': 1733, 'synset': 'accentor.n.01', 'name': 'accentor'}, {'id': 1734, 'synset': 'hedge_sparrow.n.01', 'name': 'hedge_sparrow'}, {'id': 1735, 'synset': 'lark.n.03', 'name': 'lark'}, {'id': 1736, 'synset': 'skylark.n.01', 'name': 'skylark'}, {'id': 1737, 'synset': 'wagtail.n.01', 'name': 'wagtail'}, {'id': 1738, 'synset': 'pipit.n.01', 'name': 'pipit'}, {'id': 1739, 'synset': 'meadow_pipit.n.01', 'name': 'meadow_pipit'}, {'id': 1740, 'synset': 'finch.n.01', 'name': 'finch'}, {'id': 1741, 'synset': 'chaffinch.n.01', 'name': 'chaffinch'}, {'id': 1742, 'synset': 'brambling.n.01', 'name': 'brambling'}, {'id': 1743, 'synset': 'goldfinch.n.02', 'name': 'goldfinch'}, {'id': 1744, 'synset': 'linnet.n.02', 'name': 'linnet'}, {'id': 1745, 'synset': 'siskin.n.01', 'name': 'siskin'}, {'id': 1746, 'synset': 'red_siskin.n.01', 'name': 'red_siskin'}, {'id': 1747, 'synset': 'redpoll.n.02', 'name': 'redpoll'}, {'id': 1748, 'synset': 'redpoll.n.01', 'name': 'redpoll'}, {'id': 1749, 'synset': 'new_world_goldfinch.n.01', 'name': 'New_World_goldfinch'}, {'id': 1750, 'synset': 'pine_siskin.n.01', 'name': 'pine_siskin'}, {'id': 1751, 'synset': 'house_finch.n.01', 'name': 'house_finch'}, {'id': 1752, 'synset': 'purple_finch.n.01', 'name': 'purple_finch'}, {'id': 1753, 'synset': 'canary.n.04', 'name': 'canary'}, {'id': 1754, 'synset': 'common_canary.n.01', 'name': 'common_canary'}, {'id': 1755, 'synset': 'serin.n.01', 'name': 'serin'}, {'id': 1756, 'synset': 'crossbill.n.01', 'name': 'crossbill'}, {'id': 1757, 'synset': 'bullfinch.n.02', 'name': 'bullfinch'}, {'id': 1758, 'synset': 'junco.n.01', 'name': 'junco'}, {'id': 1759, 'synset': 'dark-eyed_junco.n.01', 'name': 'dark-eyed_junco'}, {'id': 1760, 'synset': 'new_world_sparrow.n.01', 'name': 'New_World_sparrow'}, {'id': 1761, 'synset': 'vesper_sparrow.n.01', 'name': 'vesper_sparrow'}, {'id': 1762, 'synset': 'white-throated_sparrow.n.01', 'name': 'white-throated_sparrow'}, {'id': 1763, 'synset': 'white-crowned_sparrow.n.01', 'name': 'white-crowned_sparrow'}, {'id': 1764, 'synset': 'chipping_sparrow.n.01', 'name': 'chipping_sparrow'}, {'id': 1765, 'synset': 'field_sparrow.n.01', 'name': 'field_sparrow'}, {'id': 1766, 'synset': 'tree_sparrow.n.02', 'name': 'tree_sparrow'}, {'id': 1767, 'synset': 'song_sparrow.n.01', 'name': 'song_sparrow'}, {'id': 1768, 'synset': 'swamp_sparrow.n.01', 'name': 'swamp_sparrow'}, {'id': 1769, 'synset': 'bunting.n.02', 'name': 'bunting'}, {'id': 1770, 'synset': 'indigo_bunting.n.01', 'name': 'indigo_bunting'}, {'id': 1771, 'synset': 'ortolan.n.01', 'name': 'ortolan'}, {'id': 1772, 'synset': 'reed_bunting.n.01', 'name': 'reed_bunting'}, {'id': 1773, 'synset': 'yellowhammer.n.02', 'name': 'yellowhammer'}, {'id': 1774, 'synset': 'yellow-breasted_bunting.n.01', 'name': 'yellow-breasted_bunting'}, {'id': 1775, 'synset': 'snow_bunting.n.01', 'name': 'snow_bunting'}, {'id': 1776, 'synset': 'honeycreeper.n.02', 'name': 'honeycreeper'}, {'id': 1777, 'synset': 'banana_quit.n.01', 'name': 'banana_quit'}, {'id': 1778, 'synset': 'sparrow.n.01', 'name': 'sparrow'}, {'id': 1779, 'synset': 'english_sparrow.n.01', 'name': 'English_sparrow'}, {'id': 1780, 'synset': 'tree_sparrow.n.01', 'name': 'tree_sparrow'}, {'id': 1781, 'synset': 'grosbeak.n.01', 'name': 'grosbeak'}, {'id': 1782, 'synset': 'evening_grosbeak.n.01', 'name': 'evening_grosbeak'}, {'id': 1783, 'synset': 'hawfinch.n.01', 'name': 'hawfinch'}, {'id': 1784, 'synset': 'pine_grosbeak.n.01', 'name': 'pine_grosbeak'}, {'id': 1785, 'synset': 'cardinal.n.04', 'name': 'cardinal'}, {'id': 1786, 'synset': 'pyrrhuloxia.n.01', 'name': 'pyrrhuloxia'}, {'id': 1787, 'synset': 'towhee.n.01', 'name': 'towhee'}, {'id': 1788, 'synset': 'chewink.n.01', 'name': 'chewink'}, {'id': 1789, 'synset': 'green-tailed_towhee.n.01', 'name': 'green-tailed_towhee'}, {'id': 1790, 'synset': 'weaver.n.02', 'name': 'weaver'}, {'id': 1791, 'synset': 'baya.n.01', 'name': 'baya'}, {'id': 1792, 'synset': 'whydah.n.01', 'name': 'whydah'}, {'id': 1793, 'synset': 'java_sparrow.n.01', 'name': 'Java_sparrow'}, {'id': 1794, 'synset': 'avadavat.n.01', 'name': 'avadavat'}, {'id': 1795, 'synset': 'grassfinch.n.01', 'name': 'grassfinch'}, {'id': 1796, 'synset': 'zebra_finch.n.01', 'name': 'zebra_finch'}, {'id': 1797, 'synset': 'honeycreeper.n.01', 'name': 'honeycreeper'}, {'id': 1798, 'synset': 'lyrebird.n.01', 'name': 'lyrebird'}, {'id': 1799, 'synset': 'scrubbird.n.01', 'name': 'scrubbird'}, {'id': 1800, 'synset': 'broadbill.n.04', 'name': 'broadbill'}, {'id': 1801, 'synset': 'tyrannid.n.01', 'name': 'tyrannid'}, {'id': 1802, 'synset': 'new_world_flycatcher.n.01', 'name': 'New_World_flycatcher'}, {'id': 1803, 'synset': 'kingbird.n.01', 'name': 'kingbird'}, {'id': 1804, 'synset': 'arkansas_kingbird.n.01', 'name': 'Arkansas_kingbird'}, {'id': 1805, 'synset': "cassin's_kingbird.n.01", 'name': "Cassin's_kingbird"}, {'id': 1806, 'synset': 'eastern_kingbird.n.01', 'name': 'eastern_kingbird'}, {'id': 1807, 'synset': 'grey_kingbird.n.01', 'name': 'grey_kingbird'}, {'id': 1808, 'synset': 'pewee.n.01', 'name': 'pewee'}, {'id': 1809, 'synset': 'western_wood_pewee.n.01', 'name': 'western_wood_pewee'}, {'id': 1810, 'synset': 'phoebe.n.03', 'name': 'phoebe'}, {'id': 1811, 'synset': 'vermillion_flycatcher.n.01', 'name': 'vermillion_flycatcher'}, {'id': 1812, 'synset': 'cotinga.n.01', 'name': 'cotinga'}, {'id': 1813, 'synset': 'cock_of_the_rock.n.02', 'name': 'cock_of_the_rock'}, {'id': 1814, 'synset': 'cock_of_the_rock.n.01', 'name': 'cock_of_the_rock'}, {'id': 1815, 'synset': 'manakin.n.03', 'name': 'manakin'}, {'id': 1816, 'synset': 'bellbird.n.01', 'name': 'bellbird'}, {'id': 1817, 'synset': 'umbrella_bird.n.01', 'name': 'umbrella_bird'}, {'id': 1818, 'synset': 'ovenbird.n.02', 'name': 'ovenbird'}, {'id': 1819, 'synset': 'antbird.n.01', 'name': 'antbird'}, {'id': 1820, 'synset': 'ant_thrush.n.01', 'name': 'ant_thrush'}, {'id': 1821, 'synset': 'ant_shrike.n.01', 'name': 'ant_shrike'}, {'id': 1822, 'synset': 'spotted_antbird.n.01', 'name': 'spotted_antbird'}, {'id': 1823, 'synset': 'woodhewer.n.01', 'name': 'woodhewer'}, {'id': 1824, 'synset': 'pitta.n.01', 'name': 'pitta'}, {'id': 1825, 'synset': 'scissortail.n.01', 'name': 'scissortail'}, {'id': 1826, 'synset': 'old_world_flycatcher.n.01', 'name': 'Old_World_flycatcher'}, {'id': 1827, 'synset': 'spotted_flycatcher.n.01', 'name': 'spotted_flycatcher'}, {'id': 1828, 'synset': 'thickhead.n.01', 'name': 'thickhead'}, {'id': 1829, 'synset': 'thrush.n.03', 'name': 'thrush'}, {'id': 1830, 'synset': 'missel_thrush.n.01', 'name': 'missel_thrush'}, {'id': 1831, 'synset': 'song_thrush.n.01', 'name': 'song_thrush'}, {'id': 1832, 'synset': 'fieldfare.n.01', 'name': 'fieldfare'}, {'id': 1833, 'synset': 'redwing.n.02', 'name': 'redwing'}, {'id': 1834, 'synset': 'blackbird.n.02', 'name': 'blackbird'}, {'id': 1835, 'synset': 'ring_ouzel.n.01', 'name': 'ring_ouzel'}, {'id': 1836, 'synset': 'robin.n.02', 'name': 'robin'}, {'id': 1837, 'synset': 'clay-colored_robin.n.01', 'name': 'clay-colored_robin'}, {'id': 1838, 'synset': 'hermit_thrush.n.01', 'name': 'hermit_thrush'}, {'id': 1839, 'synset': 'veery.n.01', 'name': 'veery'}, {'id': 1840, 'synset': 'wood_thrush.n.01', 'name': 'wood_thrush'}, {'id': 1841, 'synset': 'nightingale.n.01', 'name': 'nightingale'}, {'id': 1842, 'synset': 'thrush_nightingale.n.01', 'name': 'thrush_nightingale'}, {'id': 1843, 'synset': 'bulbul.n.01', 'name': 'bulbul'}, {'id': 1844, 'synset': 'old_world_chat.n.01', 'name': 'Old_World_chat'}, {'id': 1845, 'synset': 'stonechat.n.01', 'name': 'stonechat'}, {'id': 1846, 'synset': 'whinchat.n.01', 'name': 'whinchat'}, {'id': 1847, 'synset': 'solitaire.n.03', 'name': 'solitaire'}, {'id': 1848, 'synset': 'redstart.n.02', 'name': 'redstart'}, {'id': 1849, 'synset': 'wheatear.n.01', 'name': 'wheatear'}, {'id': 1850, 'synset': 'bluebird.n.02', 'name': 'bluebird'}, {'id': 1851, 'synset': 'robin.n.01', 'name': 'robin'}, {'id': 1852, 'synset': 'bluethroat.n.01', 'name': 'bluethroat'}, {'id': 1853, 'synset': 'warbler.n.02', 'name': 'warbler'}, {'id': 1854, 'synset': 'gnatcatcher.n.01', 'name': 'gnatcatcher'}, {'id': 1855, 'synset': 'kinglet.n.01', 'name': 'kinglet'}, {'id': 1856, 'synset': 'goldcrest.n.01', 'name': 'goldcrest'}, {'id': 1857, 'synset': 'gold-crowned_kinglet.n.01', 'name': 'gold-crowned_kinglet'}, {'id': 1858, 'synset': 'ruby-crowned_kinglet.n.01', 'name': 'ruby-crowned_kinglet'}, {'id': 1859, 'synset': 'old_world_warbler.n.01', 'name': 'Old_World_warbler'}, {'id': 1860, 'synset': 'blackcap.n.04', 'name': 'blackcap'}, {'id': 1861, 'synset': 'greater_whitethroat.n.01', 'name': 'greater_whitethroat'}, {'id': 1862, 'synset': 'lesser_whitethroat.n.01', 'name': 'lesser_whitethroat'}, {'id': 1863, 'synset': 'wood_warbler.n.02', 'name': 'wood_warbler'}, {'id': 1864, 'synset': 'sedge_warbler.n.01', 'name': 'sedge_warbler'}, {'id': 1865, 'synset': 'wren_warbler.n.01', 'name': 'wren_warbler'}, {'id': 1866, 'synset': 'tailorbird.n.01', 'name': 'tailorbird'}, {'id': 1867, 'synset': 'babbler.n.02', 'name': 'babbler'}, {'id': 1868, 'synset': 'new_world_warbler.n.01', 'name': 'New_World_warbler'}, {'id': 1869, 'synset': 'parula_warbler.n.01', 'name': 'parula_warbler'}, {'id': 1870, 'synset': "wilson's_warbler.n.01", 'name': "Wilson's_warbler"}, {'id': 1871, 'synset': 'flycatching_warbler.n.01', 'name': 'flycatching_warbler'}, {'id': 1872, 'synset': 'american_redstart.n.01', 'name': 'American_redstart'}, {'id': 1873, 'synset': 'cape_may_warbler.n.01', 'name': 'Cape_May_warbler'}, {'id': 1874, 'synset': 'yellow_warbler.n.01', 'name': 'yellow_warbler'}, {'id': 1875, 'synset': 'blackburn.n.01', 'name': 'Blackburn'}, {'id': 1876, 'synset': "audubon's_warbler.n.01", 'name': "Audubon's_warbler"}, {'id': 1877, 'synset': 'myrtle_warbler.n.01', 'name': 'myrtle_warbler'}, {'id': 1878, 'synset': 'blackpoll.n.01', 'name': 'blackpoll'}, {'id': 1879, 'synset': 'new_world_chat.n.01', 'name': 'New_World_chat'}, {'id': 1880, 'synset': 'yellow-breasted_chat.n.01', 'name': 'yellow-breasted_chat'}, {'id': 1881, 'synset': 'ovenbird.n.01', 'name': 'ovenbird'}, {'id': 1882, 'synset': 'water_thrush.n.01', 'name': 'water_thrush'}, {'id': 1883, 'synset': 'yellowthroat.n.01', 'name': 'yellowthroat'}, {'id': 1884, 'synset': 'common_yellowthroat.n.01', 'name': 'common_yellowthroat'}, {'id': 1885, 'synset': 'riflebird.n.01', 'name': 'riflebird'}, {'id': 1886, 'synset': 'new_world_oriole.n.01', 'name': 'New_World_oriole'}, {'id': 1887, 'synset': 'northern_oriole.n.01', 'name': 'northern_oriole'}, {'id': 1888, 'synset': 'baltimore_oriole.n.01', 'name': 'Baltimore_oriole'}, {'id': 1889, 'synset': "bullock's_oriole.n.01", 'name': "Bullock's_oriole"}, {'id': 1890, 'synset': 'orchard_oriole.n.01', 'name': 'orchard_oriole'}, {'id': 1891, 'synset': 'meadowlark.n.01', 'name': 'meadowlark'}, {'id': 1892, 'synset': 'eastern_meadowlark.n.01', 'name': 'eastern_meadowlark'}, {'id': 1893, 'synset': 'western_meadowlark.n.01', 'name': 'western_meadowlark'}, {'id': 1894, 'synset': 'cacique.n.01', 'name': 'cacique'}, {'id': 1895, 'synset': 'bobolink.n.01', 'name': 'bobolink'}, {'id': 1896, 'synset': 'new_world_blackbird.n.01', 'name': 'New_World_blackbird'}, {'id': 1897, 'synset': 'grackle.n.02', 'name': 'grackle'}, {'id': 1898, 'synset': 'purple_grackle.n.01', 'name': 'purple_grackle'}, {'id': 1899, 'synset': 'rusty_blackbird.n.01', 'name': 'rusty_blackbird'}, {'id': 1900, 'synset': 'cowbird.n.01', 'name': 'cowbird'}, {'id': 1901, 'synset': 'red-winged_blackbird.n.01', 'name': 'red-winged_blackbird'}, {'id': 1902, 'synset': 'old_world_oriole.n.01', 'name': 'Old_World_oriole'}, {'id': 1903, 'synset': 'golden_oriole.n.01', 'name': 'golden_oriole'}, {'id': 1904, 'synset': 'fig-bird.n.01', 'name': 'fig-bird'}, {'id': 1905, 'synset': 'starling.n.01', 'name': 'starling'}, {'id': 1906, 'synset': 'common_starling.n.01', 'name': 'common_starling'}, {'id': 1907, 'synset': 'rose-colored_starling.n.01', 'name': 'rose-colored_starling'}, {'id': 1908, 'synset': 'myna.n.01', 'name': 'myna'}, {'id': 1909, 'synset': 'crested_myna.n.01', 'name': 'crested_myna'}, {'id': 1910, 'synset': 'hill_myna.n.01', 'name': 'hill_myna'}, {'id': 1911, 'synset': 'corvine_bird.n.01', 'name': 'corvine_bird'}, {'id': 1912, 'synset': 'american_crow.n.01', 'name': 'American_crow'}, {'id': 1913, 'synset': 'raven.n.01', 'name': 'raven'}, {'id': 1914, 'synset': 'rook.n.02', 'name': 'rook'}, {'id': 1915, 'synset': 'jackdaw.n.01', 'name': 'jackdaw'}, {'id': 1916, 'synset': 'chough.n.01', 'name': 'chough'}, {'id': 1917, 'synset': 'jay.n.02', 'name': 'jay'}, {'id': 1918, 'synset': 'old_world_jay.n.01', 'name': 'Old_World_jay'}, {'id': 1919, 'synset': 'common_european_jay.n.01', 'name': 'common_European_jay'}, {'id': 1920, 'synset': 'new_world_jay.n.01', 'name': 'New_World_jay'}, {'id': 1921, 'synset': 'blue_jay.n.01', 'name': 'blue_jay'}, {'id': 1922, 'synset': 'canada_jay.n.01', 'name': 'Canada_jay'}, {'id': 1923, 'synset': 'rocky_mountain_jay.n.01', 'name': 'Rocky_Mountain_jay'}, {'id': 1924, 'synset': 'nutcracker.n.03', 'name': 'nutcracker'}, {'id': 1925, 'synset': 'common_nutcracker.n.01', 'name': 'common_nutcracker'}, {'id': 1926, 'synset': "clark's_nutcracker.n.01", 'name': "Clark's_nutcracker"}, {'id': 1927, 'synset': 'magpie.n.01', 'name': 'magpie'}, {'id': 1928, 'synset': 'european_magpie.n.01', 'name': 'European_magpie'}, {'id': 1929, 'synset': 'american_magpie.n.01', 'name': 'American_magpie'}, {'id': 1930, 'synset': 'australian_magpie.n.01', 'name': 'Australian_magpie'}, {'id': 1931, 'synset': 'butcherbird.n.02', 'name': 'butcherbird'}, {'id': 1932, 'synset': 'currawong.n.01', 'name': 'currawong'}, {'id': 1933, 'synset': 'piping_crow.n.01', 'name': 'piping_crow'}, {'id': 1934, 'synset': 'wren.n.02', 'name': 'wren'}, {'id': 1935, 'synset': 'winter_wren.n.01', 'name': 'winter_wren'}, {'id': 1936, 'synset': 'house_wren.n.01', 'name': 'house_wren'}, {'id': 1937, 'synset': 'marsh_wren.n.01', 'name': 'marsh_wren'}, {'id': 1938, 'synset': 'long-billed_marsh_wren.n.01', 'name': 'long-billed_marsh_wren'}, {'id': 1939, 'synset': 'sedge_wren.n.01', 'name': 'sedge_wren'}, {'id': 1940, 'synset': 'rock_wren.n.02', 'name': 'rock_wren'}, {'id': 1941, 'synset': 'carolina_wren.n.01', 'name': 'Carolina_wren'}, {'id': 1942, 'synset': 'cactus_wren.n.01', 'name': 'cactus_wren'}, {'id': 1943, 'synset': 'mockingbird.n.01', 'name': 'mockingbird'}, {'id': 1944, 'synset': 'blue_mockingbird.n.01', 'name': 'blue_mockingbird'}, {'id': 1945, 'synset': 'catbird.n.02', 'name': 'catbird'}, {'id': 1946, 'synset': 'thrasher.n.02', 'name': 'thrasher'}, {'id': 1947, 'synset': 'brown_thrasher.n.01', 'name': 'brown_thrasher'}, {'id': 1948, 'synset': 'new_zealand_wren.n.01', 'name': 'New_Zealand_wren'}, {'id': 1949, 'synset': 'rock_wren.n.01', 'name': 'rock_wren'}, {'id': 1950, 'synset': 'rifleman_bird.n.01', 'name': 'rifleman_bird'}, {'id': 1951, 'synset': 'creeper.n.03', 'name': 'creeper'}, {'id': 1952, 'synset': 'brown_creeper.n.01', 'name': 'brown_creeper'}, {'id': 1953, 'synset': 'european_creeper.n.01', 'name': 'European_creeper'}, {'id': 1954, 'synset': 'wall_creeper.n.01', 'name': 'wall_creeper'}, {'id': 1955, 'synset': 'european_nuthatch.n.01', 'name': 'European_nuthatch'}, {'id': 1956, 'synset': 'red-breasted_nuthatch.n.01', 'name': 'red-breasted_nuthatch'}, {'id': 1957, 'synset': 'white-breasted_nuthatch.n.01', 'name': 'white-breasted_nuthatch'}, {'id': 1958, 'synset': 'titmouse.n.01', 'name': 'titmouse'}, {'id': 1959, 'synset': 'chickadee.n.01', 'name': 'chickadee'}, {'id': 1960, 'synset': 'black-capped_chickadee.n.01', 'name': 'black-capped_chickadee'}, {'id': 1961, 'synset': 'tufted_titmouse.n.01', 'name': 'tufted_titmouse'}, {'id': 1962, 'synset': 'carolina_chickadee.n.01', 'name': 'Carolina_chickadee'}, {'id': 1963, 'synset': 'blue_tit.n.01', 'name': 'blue_tit'}, {'id': 1964, 'synset': 'bushtit.n.01', 'name': 'bushtit'}, {'id': 1965, 'synset': 'wren-tit.n.01', 'name': 'wren-tit'}, {'id': 1966, 'synset': 'verdin.n.01', 'name': 'verdin'}, {'id': 1967, 'synset': 'fairy_bluebird.n.01', 'name': 'fairy_bluebird'}, {'id': 1968, 'synset': 'swallow.n.03', 'name': 'swallow'}, {'id': 1969, 'synset': 'barn_swallow.n.01', 'name': 'barn_swallow'}, {'id': 1970, 'synset': 'cliff_swallow.n.01', 'name': 'cliff_swallow'}, {'id': 1971, 'synset': 'tree_swallow.n.02', 'name': 'tree_swallow'}, {'id': 1972, 'synset': 'white-bellied_swallow.n.01', 'name': 'white-bellied_swallow'}, {'id': 1973, 'synset': 'martin.n.05', 'name': 'martin'}, {'id': 1974, 'synset': 'house_martin.n.01', 'name': 'house_martin'}, {'id': 1975, 'synset': 'bank_martin.n.01', 'name': 'bank_martin'}, {'id': 1976, 'synset': 'purple_martin.n.01', 'name': 'purple_martin'}, {'id': 1977, 'synset': 'wood_swallow.n.01', 'name': 'wood_swallow'}, {'id': 1978, 'synset': 'tanager.n.01', 'name': 'tanager'}, {'id': 1979, 'synset': 'scarlet_tanager.n.01', 'name': 'scarlet_tanager'}, {'id': 1980, 'synset': 'western_tanager.n.01', 'name': 'western_tanager'}, {'id': 1981, 'synset': 'summer_tanager.n.01', 'name': 'summer_tanager'}, {'id': 1982, 'synset': 'hepatic_tanager.n.01', 'name': 'hepatic_tanager'}, {'id': 1983, 'synset': 'shrike.n.01', 'name': 'shrike'}, {'id': 1984, 'synset': 'butcherbird.n.01', 'name': 'butcherbird'}, {'id': 1985, 'synset': 'european_shrike.n.01', 'name': 'European_shrike'}, {'id': 1986, 'synset': 'northern_shrike.n.01', 'name': 'northern_shrike'}, {'id': 1987, 'synset': 'white-rumped_shrike.n.01', 'name': 'white-rumped_shrike'}, {'id': 1988, 'synset': 'loggerhead_shrike.n.01', 'name': 'loggerhead_shrike'}, {'id': 1989, 'synset': 'migrant_shrike.n.01', 'name': 'migrant_shrike'}, {'id': 1990, 'synset': 'bush_shrike.n.01', 'name': 'bush_shrike'}, {'id': 1991, 'synset': 'black-fronted_bush_shrike.n.01', 'name': 'black-fronted_bush_shrike'}, {'id': 1992, 'synset': 'bowerbird.n.01', 'name': 'bowerbird'}, {'id': 1993, 'synset': 'satin_bowerbird.n.01', 'name': 'satin_bowerbird'}, {'id': 1994, 'synset': 'great_bowerbird.n.01', 'name': 'great_bowerbird'}, {'id': 1995, 'synset': 'water_ouzel.n.01', 'name': 'water_ouzel'}, {'id': 1996, 'synset': 'european_water_ouzel.n.01', 'name': 'European_water_ouzel'}, {'id': 1997, 'synset': 'american_water_ouzel.n.01', 'name': 'American_water_ouzel'}, {'id': 1998, 'synset': 'vireo.n.01', 'name': 'vireo'}, {'id': 1999, 'synset': 'red-eyed_vireo.n.01', 'name': 'red-eyed_vireo'}, {'id': 2000, 'synset': 'solitary_vireo.n.01', 'name': 'solitary_vireo'}, {'id': 2001, 'synset': 'blue-headed_vireo.n.01', 'name': 'blue-headed_vireo'}, {'id': 2002, 'synset': 'waxwing.n.01', 'name': 'waxwing'}, {'id': 2003, 'synset': 'cedar_waxwing.n.01', 'name': 'cedar_waxwing'}, {'id': 2004, 'synset': 'bohemian_waxwing.n.01', 'name': 'Bohemian_waxwing'}, {'id': 2005, 'synset': 'bird_of_prey.n.01', 'name': 'bird_of_prey'}, {'id': 2006, 'synset': 'accipitriformes.n.01', 'name': 'Accipitriformes'}, {'id': 2007, 'synset': 'hawk.n.01', 'name': 'hawk'}, {'id': 2008, 'synset': 'eyas.n.01', 'name': 'eyas'}, {'id': 2009, 'synset': 'tiercel.n.01', 'name': 'tiercel'}, {'id': 2010, 'synset': 'goshawk.n.01', 'name': 'goshawk'}, {'id': 2011, 'synset': 'sparrow_hawk.n.02', 'name': 'sparrow_hawk'}, {'id': 2012, 'synset': "cooper's_hawk.n.01", 'name': "Cooper's_hawk"}, {'id': 2013, 'synset': 'chicken_hawk.n.01', 'name': 'chicken_hawk'}, {'id': 2014, 'synset': 'buteonine.n.01', 'name': 'buteonine'}, {'id': 2015, 'synset': 'redtail.n.01', 'name': 'redtail'}, {'id': 2016, 'synset': 'rough-legged_hawk.n.01', 'name': 'rough-legged_hawk'}, {'id': 2017, 'synset': 'red-shouldered_hawk.n.01', 'name': 'red-shouldered_hawk'}, {'id': 2018, 'synset': 'buzzard.n.02', 'name': 'buzzard'}, {'id': 2019, 'synset': 'honey_buzzard.n.01', 'name': 'honey_buzzard'}, {'id': 2020, 'synset': 'kite.n.04', 'name': 'kite'}, {'id': 2021, 'synset': 'black_kite.n.01', 'name': 'black_kite'}, {'id': 2022, 'synset': 'swallow-tailed_kite.n.01', 'name': 'swallow-tailed_kite'}, {'id': 2023, 'synset': 'white-tailed_kite.n.01', 'name': 'white-tailed_kite'}, {'id': 2024, 'synset': 'harrier.n.03', 'name': 'harrier'}, {'id': 2025, 'synset': 'marsh_harrier.n.01', 'name': 'marsh_harrier'}, {'id': 2026, 'synset': "montagu's_harrier.n.01", 'name': "Montagu's_harrier"}, {'id': 2027, 'synset': 'marsh_hawk.n.01', 'name': 'marsh_hawk'}, {'id': 2028, 'synset': 'harrier_eagle.n.01', 'name': 'harrier_eagle'}, {'id': 2029, 'synset': 'peregrine.n.01', 'name': 'peregrine'}, {'id': 2030, 'synset': 'falcon-gentle.n.01', 'name': 'falcon-gentle'}, {'id': 2031, 'synset': 'gyrfalcon.n.01', 'name': 'gyrfalcon'}, {'id': 2032, 'synset': 'kestrel.n.02', 'name': 'kestrel'}, {'id': 2033, 'synset': 'sparrow_hawk.n.01', 'name': 'sparrow_hawk'}, {'id': 2034, 'synset': 'pigeon_hawk.n.01', 'name': 'pigeon_hawk'}, {'id': 2035, 'synset': 'hobby.n.03', 'name': 'hobby'}, {'id': 2036, 'synset': 'caracara.n.01', 'name': 'caracara'}, {'id': 2037, 'synset': "audubon's_caracara.n.01", 'name': "Audubon's_caracara"}, {'id': 2038, 'synset': 'carancha.n.01', 'name': 'carancha'}, {'id': 2039, 'synset': 'young_bird.n.01', 'name': 'young_bird'}, {'id': 2040, 'synset': 'eaglet.n.01', 'name': 'eaglet'}, {'id': 2041, 'synset': 'harpy.n.04', 'name': 'harpy'}, {'id': 2042, 'synset': 'golden_eagle.n.01', 'name': 'golden_eagle'}, {'id': 2043, 'synset': 'tawny_eagle.n.01', 'name': 'tawny_eagle'}, {'id': 2044, 'synset': 'bald_eagle.n.01', 'name': 'bald_eagle'}, {'id': 2045, 'synset': 'sea_eagle.n.02', 'name': 'sea_eagle'}, {'id': 2046, 'synset': 'kamchatkan_sea_eagle.n.01', 'name': 'Kamchatkan_sea_eagle'}, {'id': 2047, 'synset': 'ern.n.01', 'name': 'ern'}, {'id': 2048, 'synset': 'fishing_eagle.n.01', 'name': 'fishing_eagle'}, {'id': 2049, 'synset': 'osprey.n.01', 'name': 'osprey'}, {'id': 2050, 'synset': 'aegypiidae.n.01', 'name': 'Aegypiidae'}, {'id': 2051, 'synset': 'old_world_vulture.n.01', 'name': 'Old_World_vulture'}, {'id': 2052, 'synset': 'griffon_vulture.n.01', 'name': 'griffon_vulture'}, {'id': 2053, 'synset': 'bearded_vulture.n.01', 'name': 'bearded_vulture'}, {'id': 2054, 'synset': 'egyptian_vulture.n.01', 'name': 'Egyptian_vulture'}, {'id': 2055, 'synset': 'black_vulture.n.02', 'name': 'black_vulture'}, {'id': 2056, 'synset': 'secretary_bird.n.01', 'name': 'secretary_bird'}, {'id': 2057, 'synset': 'new_world_vulture.n.01', 'name': 'New_World_vulture'}, {'id': 2058, 'synset': 'buzzard.n.01', 'name': 'buzzard'}, {'id': 2059, 'synset': 'condor.n.01', 'name': 'condor'}, {'id': 2060, 'synset': 'andean_condor.n.01', 'name': 'Andean_condor'}, {'id': 2061, 'synset': 'california_condor.n.01', 'name': 'California_condor'}, {'id': 2062, 'synset': 'black_vulture.n.01', 'name': 'black_vulture'}, {'id': 2063, 'synset': 'king_vulture.n.01', 'name': 'king_vulture'}, {'id': 2064, 'synset': 'owlet.n.01', 'name': 'owlet'}, {'id': 2065, 'synset': 'little_owl.n.01', 'name': 'little_owl'}, {'id': 2066, 'synset': 'horned_owl.n.01', 'name': 'horned_owl'}, {'id': 2067, 'synset': 'great_horned_owl.n.01', 'name': 'great_horned_owl'}, {'id': 2068, 'synset': 'great_grey_owl.n.01', 'name': 'great_grey_owl'}, {'id': 2069, 'synset': 'tawny_owl.n.01', 'name': 'tawny_owl'}, {'id': 2070, 'synset': 'barred_owl.n.01', 'name': 'barred_owl'}, {'id': 2071, 'synset': 'screech_owl.n.02', 'name': 'screech_owl'}, {'id': 2072, 'synset': 'screech_owl.n.01', 'name': 'screech_owl'}, {'id': 2073, 'synset': 'scops_owl.n.01', 'name': 'scops_owl'}, {'id': 2074, 'synset': 'spotted_owl.n.01', 'name': 'spotted_owl'}, {'id': 2075, 'synset': 'old_world_scops_owl.n.01', 'name': 'Old_World_scops_owl'}, {'id': 2076, 'synset': 'oriental_scops_owl.n.01', 'name': 'Oriental_scops_owl'}, {'id': 2077, 'synset': 'hoot_owl.n.01', 'name': 'hoot_owl'}, {'id': 2078, 'synset': 'hawk_owl.n.01', 'name': 'hawk_owl'}, {'id': 2079, 'synset': 'long-eared_owl.n.01', 'name': 'long-eared_owl'}, {'id': 2080, 'synset': 'laughing_owl.n.01', 'name': 'laughing_owl'}, {'id': 2081, 'synset': 'barn_owl.n.01', 'name': 'barn_owl'}, {'id': 2082, 'synset': 'amphibian.n.03', 'name': 'amphibian'}, {'id': 2083, 'synset': 'ichyostega.n.01', 'name': 'Ichyostega'}, {'id': 2084, 'synset': 'urodele.n.01', 'name': 'urodele'}, {'id': 2085, 'synset': 'salamander.n.01', 'name': 'salamander'}, {'id': 2086, 'synset': 'european_fire_salamander.n.01', 'name': 'European_fire_salamander'}, {'id': 2087, 'synset': 'spotted_salamander.n.02', 'name': 'spotted_salamander'}, {'id': 2088, 'synset': 'alpine_salamander.n.01', 'name': 'alpine_salamander'}, {'id': 2089, 'synset': 'newt.n.01', 'name': 'newt'}, {'id': 2090, 'synset': 'common_newt.n.01', 'name': 'common_newt'}, {'id': 2091, 'synset': 'red_eft.n.01', 'name': 'red_eft'}, {'id': 2092, 'synset': 'pacific_newt.n.01', 'name': 'Pacific_newt'}, {'id': 2093, 'synset': 'rough-skinned_newt.n.01', 'name': 'rough-skinned_newt'}, {'id': 2094, 'synset': 'california_newt.n.01', 'name': 'California_newt'}, {'id': 2095, 'synset': 'eft.n.01', 'name': 'eft'}, {'id': 2096, 'synset': 'ambystomid.n.01', 'name': 'ambystomid'}, {'id': 2097, 'synset': 'mole_salamander.n.01', 'name': 'mole_salamander'}, {'id': 2098, 'synset': 'spotted_salamander.n.01', 'name': 'spotted_salamander'}, {'id': 2099, 'synset': 'tiger_salamander.n.01', 'name': 'tiger_salamander'}, {'id': 2100, 'synset': 'axolotl.n.01', 'name': 'axolotl'}, {'id': 2101, 'synset': 'waterdog.n.01', 'name': 'waterdog'}, {'id': 2102, 'synset': 'hellbender.n.01', 'name': 'hellbender'}, {'id': 2103, 'synset': 'giant_salamander.n.01', 'name': 'giant_salamander'}, {'id': 2104, 'synset': 'olm.n.01', 'name': 'olm'}, {'id': 2105, 'synset': 'mud_puppy.n.01', 'name': 'mud_puppy'}, {'id': 2106, 'synset': 'dicamptodon.n.01', 'name': 'dicamptodon'}, {'id': 2107, 'synset': 'pacific_giant_salamander.n.01', 'name': 'Pacific_giant_salamander'}, {'id': 2108, 'synset': 'olympic_salamander.n.01', 'name': 'olympic_salamander'}, {'id': 2109, 'synset': 'lungless_salamander.n.01', 'name': 'lungless_salamander'}, {'id': 2110, 'synset': 'eastern_red-backed_salamander.n.01', 'name': 'eastern_red-backed_salamander'}, {'id': 2111, 'synset': 'western_red-backed_salamander.n.01', 'name': 'western_red-backed_salamander'}, {'id': 2112, 'synset': 'dusky_salamander.n.01', 'name': 'dusky_salamander'}, {'id': 2113, 'synset': 'climbing_salamander.n.01', 'name': 'climbing_salamander'}, {'id': 2114, 'synset': 'arboreal_salamander.n.01', 'name': 'arboreal_salamander'}, {'id': 2115, 'synset': 'slender_salamander.n.01', 'name': 'slender_salamander'}, {'id': 2116, 'synset': 'web-toed_salamander.n.01', 'name': 'web-toed_salamander'}, {'id': 2117, 'synset': 'shasta_salamander.n.01', 'name': 'Shasta_salamander'}, {'id': 2118, 'synset': 'limestone_salamander.n.01', 'name': 'limestone_salamander'}, {'id': 2119, 'synset': 'amphiuma.n.01', 'name': 'amphiuma'}, {'id': 2120, 'synset': 'siren.n.05', 'name': 'siren'}, {'id': 2121, 'synset': 'true_frog.n.01', 'name': 'true_frog'}, {'id': 2122, 'synset': 'wood-frog.n.01', 'name': 'wood-frog'}, {'id': 2123, 'synset': 'leopard_frog.n.01', 'name': 'leopard_frog'}, {'id': 2124, 'synset': 'bullfrog.n.01', 'name': 'bullfrog'}, {'id': 2125, 'synset': 'green_frog.n.01', 'name': 'green_frog'}, {'id': 2126, 'synset': 'cascades_frog.n.01', 'name': 'cascades_frog'}, {'id': 2127, 'synset': 'goliath_frog.n.01', 'name': 'goliath_frog'}, {'id': 2128, 'synset': 'pickerel_frog.n.01', 'name': 'pickerel_frog'}, {'id': 2129, 'synset': 'tarahumara_frog.n.01', 'name': 'tarahumara_frog'}, {'id': 2130, 'synset': 'grass_frog.n.01', 'name': 'grass_frog'}, {'id': 2131, 'synset': 'leptodactylid_frog.n.01', 'name': 'leptodactylid_frog'}, {'id': 2132, 'synset': 'robber_frog.n.02', 'name': 'robber_frog'}, {'id': 2133, 'synset': 'barking_frog.n.01', 'name': 'barking_frog'}, {'id': 2134, 'synset': 'crapaud.n.01', 'name': 'crapaud'}, {'id': 2135, 'synset': 'tree_frog.n.02', 'name': 'tree_frog'}, {'id': 2136, 'synset': 'tailed_frog.n.01', 'name': 'tailed_frog'}, {'id': 2137, 'synset': 'liopelma_hamiltoni.n.01', 'name': 'Liopelma_hamiltoni'}, {'id': 2138, 'synset': 'true_toad.n.01', 'name': 'true_toad'}, {'id': 2139, 'synset': 'bufo.n.01', 'name': 'bufo'}, {'id': 2140, 'synset': 'agua.n.01', 'name': 'agua'}, {'id': 2141, 'synset': 'european_toad.n.01', 'name': 'European_toad'}, {'id': 2142, 'synset': 'natterjack.n.01', 'name': 'natterjack'}, {'id': 2143, 'synset': 'american_toad.n.01', 'name': 'American_toad'}, {'id': 2144, 'synset': 'eurasian_green_toad.n.01', 'name': 'Eurasian_green_toad'}, {'id': 2145, 'synset': 'american_green_toad.n.01', 'name': 'American_green_toad'}, {'id': 2146, 'synset': 'yosemite_toad.n.01', 'name': 'Yosemite_toad'}, {'id': 2147, 'synset': 'texas_toad.n.01', 'name': 'Texas_toad'}, {'id': 2148, 'synset': 'southwestern_toad.n.01', 'name': 'southwestern_toad'}, {'id': 2149, 'synset': 'western_toad.n.01', 'name': 'western_toad'}, {'id': 2150, 'synset': 'obstetrical_toad.n.01', 'name': 'obstetrical_toad'}, {'id': 2151, 'synset': 'midwife_toad.n.01', 'name': 'midwife_toad'}, {'id': 2152, 'synset': 'fire-bellied_toad.n.01', 'name': 'fire-bellied_toad'}, {'id': 2153, 'synset': 'spadefoot.n.01', 'name': 'spadefoot'}, {'id': 2154, 'synset': 'western_spadefoot.n.01', 'name': 'western_spadefoot'}, {'id': 2155, 'synset': 'southern_spadefoot.n.01', 'name': 'southern_spadefoot'}, {'id': 2156, 'synset': 'plains_spadefoot.n.01', 'name': 'plains_spadefoot'}, {'id': 2157, 'synset': 'tree_toad.n.01', 'name': 'tree_toad'}, {'id': 2158, 'synset': 'spring_peeper.n.01', 'name': 'spring_peeper'}, {'id': 2159, 'synset': 'pacific_tree_toad.n.01', 'name': 'Pacific_tree_toad'}, {'id': 2160, 'synset': 'canyon_treefrog.n.01', 'name': 'canyon_treefrog'}, {'id': 2161, 'synset': 'chameleon_tree_frog.n.01', 'name': 'chameleon_tree_frog'}, {'id': 2162, 'synset': 'cricket_frog.n.01', 'name': 'cricket_frog'}, {'id': 2163, 'synset': 'northern_cricket_frog.n.01', 'name': 'northern_cricket_frog'}, {'id': 2164, 'synset': 'eastern_cricket_frog.n.01', 'name': 'eastern_cricket_frog'}, {'id': 2165, 'synset': 'chorus_frog.n.01', 'name': 'chorus_frog'}, {'id': 2166, 'synset': 'lowland_burrowing_treefrog.n.01', 'name': 'lowland_burrowing_treefrog'}, {'id': 2167, 'synset': 'western_narrow-mouthed_toad.n.01', 'name': 'western_narrow-mouthed_toad'}, {'id': 2168, 'synset': 'eastern_narrow-mouthed_toad.n.01', 'name': 'eastern_narrow-mouthed_toad'}, {'id': 2169, 'synset': 'sheep_frog.n.01', 'name': 'sheep_frog'}, {'id': 2170, 'synset': 'tongueless_frog.n.01', 'name': 'tongueless_frog'}, {'id': 2171, 'synset': 'surinam_toad.n.01', 'name': 'Surinam_toad'}, {'id': 2172, 'synset': 'african_clawed_frog.n.01', 'name': 'African_clawed_frog'}, {'id': 2173, 'synset': 'south_american_poison_toad.n.01', 'name': 'South_American_poison_toad'}, {'id': 2174, 'synset': 'caecilian.n.01', 'name': 'caecilian'}, {'id': 2175, 'synset': 'reptile.n.01', 'name': 'reptile'}, {'id': 2176, 'synset': 'anapsid.n.01', 'name': 'anapsid'}, {'id': 2177, 'synset': 'diapsid.n.01', 'name': 'diapsid'}, {'id': 2178, 'synset': 'diapsida.n.01', 'name': 'Diapsida'}, {'id': 2179, 'synset': 'chelonian.n.01', 'name': 'chelonian'}, {'id': 2180, 'synset': 'sea_turtle.n.01', 'name': 'sea_turtle'}, {'id': 2181, 'synset': 'green_turtle.n.01', 'name': 'green_turtle'}, {'id': 2182, 'synset': 'loggerhead.n.02', 'name': 'loggerhead'}, {'id': 2183, 'synset': 'ridley.n.01', 'name': 'ridley'}, {'id': 2184, 'synset': 'atlantic_ridley.n.01', 'name': 'Atlantic_ridley'}, {'id': 2185, 'synset': 'pacific_ridley.n.01', 'name': 'Pacific_ridley'}, {'id': 2186, 'synset': 'hawksbill_turtle.n.01', 'name': 'hawksbill_turtle'}, {'id': 2187, 'synset': 'leatherback_turtle.n.01', 'name': 'leatherback_turtle'}, {'id': 2188, 'synset': 'snapping_turtle.n.01', 'name': 'snapping_turtle'}, {'id': 2189, 'synset': 'common_snapping_turtle.n.01', 'name': 'common_snapping_turtle'}, {'id': 2190, 'synset': 'alligator_snapping_turtle.n.01', 'name': 'alligator_snapping_turtle'}, {'id': 2191, 'synset': 'mud_turtle.n.01', 'name': 'mud_turtle'}, {'id': 2192, 'synset': 'musk_turtle.n.01', 'name': 'musk_turtle'}, {'id': 2193, 'synset': 'terrapin.n.01', 'name': 'terrapin'}, {'id': 2194, 'synset': 'diamondback_terrapin.n.01', 'name': 'diamondback_terrapin'}, {'id': 2195, 'synset': 'red-bellied_terrapin.n.01', 'name': 'red-bellied_terrapin'}, {'id': 2196, 'synset': 'slider.n.03', 'name': 'slider'}, {'id': 2197, 'synset': 'cooter.n.01', 'name': 'cooter'}, {'id': 2198, 'synset': 'box_turtle.n.01', 'name': 'box_turtle'}, {'id': 2199, 'synset': 'western_box_turtle.n.01', 'name': 'Western_box_turtle'}, {'id': 2200, 'synset': 'painted_turtle.n.01', 'name': 'painted_turtle'}, {'id': 2201, 'synset': 'tortoise.n.01', 'name': 'tortoise'}, {'id': 2202, 'synset': 'european_tortoise.n.01', 'name': 'European_tortoise'}, {'id': 2203, 'synset': 'giant_tortoise.n.01', 'name': 'giant_tortoise'}, {'id': 2204, 'synset': 'gopher_tortoise.n.01', 'name': 'gopher_tortoise'}, {'id': 2205, 'synset': 'desert_tortoise.n.01', 'name': 'desert_tortoise'}, {'id': 2206, 'synset': 'texas_tortoise.n.01', 'name': 'Texas_tortoise'}, {'id': 2207, 'synset': 'soft-shelled_turtle.n.01', 'name': 'soft-shelled_turtle'}, {'id': 2208, 'synset': 'spiny_softshell.n.01', 'name': 'spiny_softshell'}, {'id': 2209, 'synset': 'smooth_softshell.n.01', 'name': 'smooth_softshell'}, {'id': 2210, 'synset': 'tuatara.n.01', 'name': 'tuatara'}, {'id': 2211, 'synset': 'saurian.n.01', 'name': 'saurian'}, {'id': 2212, 'synset': 'gecko.n.01', 'name': 'gecko'}, {'id': 2213, 'synset': 'flying_gecko.n.01', 'name': 'flying_gecko'}, {'id': 2214, 'synset': 'banded_gecko.n.01', 'name': 'banded_gecko'}, {'id': 2215, 'synset': 'iguanid.n.01', 'name': 'iguanid'}, {'id': 2216, 'synset': 'common_iguana.n.01', 'name': 'common_iguana'}, {'id': 2217, 'synset': 'marine_iguana.n.01', 'name': 'marine_iguana'}, {'id': 2218, 'synset': 'desert_iguana.n.01', 'name': 'desert_iguana'}, {'id': 2219, 'synset': 'chuckwalla.n.01', 'name': 'chuckwalla'}, {'id': 2220, 'synset': 'zebra-tailed_lizard.n.01', 'name': 'zebra-tailed_lizard'}, {'id': 2221, 'synset': 'fringe-toed_lizard.n.01', 'name': 'fringe-toed_lizard'}, {'id': 2222, 'synset': 'earless_lizard.n.01', 'name': 'earless_lizard'}, {'id': 2223, 'synset': 'collared_lizard.n.01', 'name': 'collared_lizard'}, {'id': 2224, 'synset': 'leopard_lizard.n.01', 'name': 'leopard_lizard'}, {'id': 2225, 'synset': 'spiny_lizard.n.02', 'name': 'spiny_lizard'}, {'id': 2226, 'synset': 'fence_lizard.n.01', 'name': 'fence_lizard'}, {'id': 2227, 'synset': 'western_fence_lizard.n.01', 'name': 'western_fence_lizard'}, {'id': 2228, 'synset': 'eastern_fence_lizard.n.01', 'name': 'eastern_fence_lizard'}, {'id': 2229, 'synset': 'sagebrush_lizard.n.01', 'name': 'sagebrush_lizard'}, {'id': 2230, 'synset': 'side-blotched_lizard.n.01', 'name': 'side-blotched_lizard'}, {'id': 2231, 'synset': 'tree_lizard.n.01', 'name': 'tree_lizard'}, {'id': 2232, 'synset': 'horned_lizard.n.01', 'name': 'horned_lizard'}, {'id': 2233, 'synset': 'texas_horned_lizard.n.01', 'name': 'Texas_horned_lizard'}, {'id': 2234, 'synset': 'basilisk.n.03', 'name': 'basilisk'}, {'id': 2235, 'synset': 'american_chameleon.n.01', 'name': 'American_chameleon'}, {'id': 2236, 'synset': 'worm_lizard.n.01', 'name': 'worm_lizard'}, {'id': 2237, 'synset': 'night_lizard.n.01', 'name': 'night_lizard'}, {'id': 2238, 'synset': 'skink.n.01', 'name': 'skink'}, {'id': 2239, 'synset': 'western_skink.n.01', 'name': 'western_skink'}, {'id': 2240, 'synset': 'mountain_skink.n.01', 'name': 'mountain_skink'}, {'id': 2241, 'synset': 'teiid_lizard.n.01', 'name': 'teiid_lizard'}, {'id': 2242, 'synset': 'whiptail.n.01', 'name': 'whiptail'}, {'id': 2243, 'synset': 'racerunner.n.01', 'name': 'racerunner'}, {'id': 2244, 'synset': 'plateau_striped_whiptail.n.01', 'name': 'plateau_striped_whiptail'}, {'id': 2245, 'synset': 'chihuahuan_spotted_whiptail.n.01', 'name': 'Chihuahuan_spotted_whiptail'}, {'id': 2246, 'synset': 'western_whiptail.n.01', 'name': 'western_whiptail'}, {'id': 2247, 'synset': 'checkered_whiptail.n.01', 'name': 'checkered_whiptail'}, {'id': 2248, 'synset': 'teju.n.01', 'name': 'teju'}, {'id': 2249, 'synset': 'caiman_lizard.n.01', 'name': 'caiman_lizard'}, {'id': 2250, 'synset': 'agamid.n.01', 'name': 'agamid'}, {'id': 2251, 'synset': 'agama.n.01', 'name': 'agama'}, {'id': 2252, 'synset': 'frilled_lizard.n.01', 'name': 'frilled_lizard'}, {'id': 2253, 'synset': 'moloch.n.03', 'name': 'moloch'}, {'id': 2254, 'synset': 'mountain_devil.n.02', 'name': 'mountain_devil'}, {'id': 2255, 'synset': 'anguid_lizard.n.01', 'name': 'anguid_lizard'}, {'id': 2256, 'synset': 'alligator_lizard.n.01', 'name': 'alligator_lizard'}, {'id': 2257, 'synset': 'blindworm.n.01', 'name': 'blindworm'}, {'id': 2258, 'synset': 'glass_lizard.n.01', 'name': 'glass_lizard'}, {'id': 2259, 'synset': 'legless_lizard.n.01', 'name': 'legless_lizard'}, {'id': 2260, 'synset': 'lanthanotus_borneensis.n.01', 'name': 'Lanthanotus_borneensis'}, {'id': 2261, 'synset': 'venomous_lizard.n.01', 'name': 'venomous_lizard'}, {'id': 2262, 'synset': 'gila_monster.n.01', 'name': 'Gila_monster'}, {'id': 2263, 'synset': 'beaded_lizard.n.01', 'name': 'beaded_lizard'}, {'id': 2264, 'synset': 'lacertid_lizard.n.01', 'name': 'lacertid_lizard'}, {'id': 2265, 'synset': 'sand_lizard.n.01', 'name': 'sand_lizard'}, {'id': 2266, 'synset': 'green_lizard.n.01', 'name': 'green_lizard'}, {'id': 2267, 'synset': 'chameleon.n.03', 'name': 'chameleon'}, {'id': 2268, 'synset': 'african_chameleon.n.01', 'name': 'African_chameleon'}, {'id': 2269, 'synset': 'horned_chameleon.n.01', 'name': 'horned_chameleon'}, {'id': 2270, 'synset': 'monitor.n.07', 'name': 'monitor'}, {'id': 2271, 'synset': 'african_monitor.n.01', 'name': 'African_monitor'}, {'id': 2272, 'synset': 'komodo_dragon.n.01', 'name': 'Komodo_dragon'}, {'id': 2273, 'synset': 'crocodilian_reptile.n.01', 'name': 'crocodilian_reptile'}, {'id': 2274, 'synset': 'crocodile.n.01', 'name': 'crocodile'}, {'id': 2275, 'synset': 'african_crocodile.n.01', 'name': 'African_crocodile'}, {'id': 2276, 'synset': 'asian_crocodile.n.01', 'name': 'Asian_crocodile'}, {'id': 2277, 'synset': "morlett's_crocodile.n.01", 'name': "Morlett's_crocodile"}, {'id': 2278, 'synset': 'false_gavial.n.01', 'name': 'false_gavial'}, {'id': 2279, 'synset': 'american_alligator.n.01', 'name': 'American_alligator'}, {'id': 2280, 'synset': 'chinese_alligator.n.01', 'name': 'Chinese_alligator'}, {'id': 2281, 'synset': 'caiman.n.01', 'name': 'caiman'}, {'id': 2282, 'synset': 'spectacled_caiman.n.01', 'name': 'spectacled_caiman'}, {'id': 2283, 'synset': 'gavial.n.01', 'name': 'gavial'}, {'id': 2284, 'synset': 'armored_dinosaur.n.01', 'name': 'armored_dinosaur'}, {'id': 2285, 'synset': 'stegosaur.n.01', 'name': 'stegosaur'}, {'id': 2286, 'synset': 'ankylosaur.n.01', 'name': 'ankylosaur'}, {'id': 2287, 'synset': 'edmontonia.n.01', 'name': 'Edmontonia'}, {'id': 2288, 'synset': 'bone-headed_dinosaur.n.01', 'name': 'bone-headed_dinosaur'}, {'id': 2289, 'synset': 'pachycephalosaur.n.01', 'name': 'pachycephalosaur'}, {'id': 2290, 'synset': 'ceratopsian.n.01', 'name': 'ceratopsian'}, {'id': 2291, 'synset': 'protoceratops.n.01', 'name': 'protoceratops'}, {'id': 2292, 'synset': 'triceratops.n.01', 'name': 'triceratops'}, {'id': 2293, 'synset': 'styracosaur.n.01', 'name': 'styracosaur'}, {'id': 2294, 'synset': 'psittacosaur.n.01', 'name': 'psittacosaur'}, {'id': 2295, 'synset': 'ornithopod.n.01', 'name': 'ornithopod'}, {'id': 2296, 'synset': 'hadrosaur.n.01', 'name': 'hadrosaur'}, {'id': 2297, 'synset': 'trachodon.n.01', 'name': 'trachodon'}, {'id': 2298, 'synset': 'saurischian.n.01', 'name': 'saurischian'}, {'id': 2299, 'synset': 'sauropod.n.01', 'name': 'sauropod'}, {'id': 2300, 'synset': 'apatosaur.n.01', 'name': 'apatosaur'}, {'id': 2301, 'synset': 'barosaur.n.01', 'name': 'barosaur'}, {'id': 2302, 'synset': 'diplodocus.n.01', 'name': 'diplodocus'}, {'id': 2303, 'synset': 'argentinosaur.n.01', 'name': 'argentinosaur'}, {'id': 2304, 'synset': 'theropod.n.01', 'name': 'theropod'}, {'id': 2305, 'synset': 'ceratosaur.n.01', 'name': 'ceratosaur'}, {'id': 2306, 'synset': 'coelophysis.n.01', 'name': 'coelophysis'}, {'id': 2307, 'synset': 'tyrannosaur.n.01', 'name': 'tyrannosaur'}, {'id': 2308, 'synset': 'allosaur.n.01', 'name': 'allosaur'}, {'id': 2309, 'synset': 'ornithomimid.n.01', 'name': 'ornithomimid'}, {'id': 2310, 'synset': 'maniraptor.n.01', 'name': 'maniraptor'}, {'id': 2311, 'synset': 'oviraptorid.n.01', 'name': 'oviraptorid'}, {'id': 2312, 'synset': 'velociraptor.n.01', 'name': 'velociraptor'}, {'id': 2313, 'synset': 'deinonychus.n.01', 'name': 'deinonychus'}, {'id': 2314, 'synset': 'utahraptor.n.01', 'name': 'utahraptor'}, {'id': 2315, 'synset': 'synapsid.n.01', 'name': 'synapsid'}, {'id': 2316, 'synset': 'dicynodont.n.01', 'name': 'dicynodont'}, {'id': 2317, 'synset': 'pelycosaur.n.01', 'name': 'pelycosaur'}, {'id': 2318, 'synset': 'dimetrodon.n.01', 'name': 'dimetrodon'}, {'id': 2319, 'synset': 'pterosaur.n.01', 'name': 'pterosaur'}, {'id': 2320, 'synset': 'pterodactyl.n.01', 'name': 'pterodactyl'}, {'id': 2321, 'synset': 'ichthyosaur.n.01', 'name': 'ichthyosaur'}, {'id': 2322, 'synset': 'ichthyosaurus.n.01', 'name': 'ichthyosaurus'}, {'id': 2323, 'synset': 'stenopterygius.n.01', 'name': 'stenopterygius'}, {'id': 2324, 'synset': 'plesiosaur.n.01', 'name': 'plesiosaur'}, {'id': 2325, 'synset': 'nothosaur.n.01', 'name': 'nothosaur'}, {'id': 2326, 'synset': 'colubrid_snake.n.01', 'name': 'colubrid_snake'}, {'id': 2327, 'synset': 'hoop_snake.n.01', 'name': 'hoop_snake'}, {'id': 2328, 'synset': 'thunder_snake.n.01', 'name': 'thunder_snake'}, {'id': 2329, 'synset': 'ringneck_snake.n.01', 'name': 'ringneck_snake'}, {'id': 2330, 'synset': 'hognose_snake.n.01', 'name': 'hognose_snake'}, {'id': 2331, 'synset': 'leaf-nosed_snake.n.01', 'name': 'leaf-nosed_snake'}, {'id': 2332, 'synset': 'green_snake.n.02', 'name': 'green_snake'}, {'id': 2333, 'synset': 'smooth_green_snake.n.01', 'name': 'smooth_green_snake'}, {'id': 2334, 'synset': 'rough_green_snake.n.01', 'name': 'rough_green_snake'}, {'id': 2335, 'synset': 'green_snake.n.01', 'name': 'green_snake'}, {'id': 2336, 'synset': 'racer.n.04', 'name': 'racer'}, {'id': 2337, 'synset': 'blacksnake.n.02', 'name': 'blacksnake'}, {'id': 2338, 'synset': 'blue_racer.n.01', 'name': 'blue_racer'}, {'id': 2339, 'synset': 'horseshoe_whipsnake.n.01', 'name': 'horseshoe_whipsnake'}, {'id': 2340, 'synset': 'whip-snake.n.01', 'name': 'whip-snake'}, {'id': 2341, 'synset': 'coachwhip.n.02', 'name': 'coachwhip'}, {'id': 2342, 'synset': 'california_whipsnake.n.01', 'name': 'California_whipsnake'}, {'id': 2343, 'synset': 'sonoran_whipsnake.n.01', 'name': 'Sonoran_whipsnake'}, {'id': 2344, 'synset': 'rat_snake.n.01', 'name': 'rat_snake'}, {'id': 2345, 'synset': 'corn_snake.n.01', 'name': 'corn_snake'}, {'id': 2346, 'synset': 'black_rat_snake.n.01', 'name': 'black_rat_snake'}, {'id': 2347, 'synset': 'chicken_snake.n.01', 'name': 'chicken_snake'}, {'id': 2348, 'synset': 'indian_rat_snake.n.01', 'name': 'Indian_rat_snake'}, {'id': 2349, 'synset': 'glossy_snake.n.01', 'name': 'glossy_snake'}, {'id': 2350, 'synset': 'bull_snake.n.01', 'name': 'bull_snake'}, {'id': 2351, 'synset': 'gopher_snake.n.02', 'name': 'gopher_snake'}, {'id': 2352, 'synset': 'pine_snake.n.01', 'name': 'pine_snake'}, {'id': 2353, 'synset': 'king_snake.n.01', 'name': 'king_snake'}, {'id': 2354, 'synset': 'common_kingsnake.n.01', 'name': 'common_kingsnake'}, {'id': 2355, 'synset': 'milk_snake.n.01', 'name': 'milk_snake'}, {'id': 2356, 'synset': 'garter_snake.n.01', 'name': 'garter_snake'}, {'id': 2357, 'synset': 'common_garter_snake.n.01', 'name': 'common_garter_snake'}, {'id': 2358, 'synset': 'ribbon_snake.n.01', 'name': 'ribbon_snake'}, {'id': 2359, 'synset': 'western_ribbon_snake.n.01', 'name': 'Western_ribbon_snake'}, {'id': 2360, 'synset': 'lined_snake.n.01', 'name': 'lined_snake'}, {'id': 2361, 'synset': 'ground_snake.n.01', 'name': 'ground_snake'}, {'id': 2362, 'synset': 'eastern_ground_snake.n.01', 'name': 'eastern_ground_snake'}, {'id': 2363, 'synset': 'water_snake.n.01', 'name': 'water_snake'}, {'id': 2364, 'synset': 'common_water_snake.n.01', 'name': 'common_water_snake'}, {'id': 2365, 'synset': 'water_moccasin.n.02', 'name': 'water_moccasin'}, {'id': 2366, 'synset': 'grass_snake.n.01', 'name': 'grass_snake'}, {'id': 2367, 'synset': 'viperine_grass_snake.n.01', 'name': 'viperine_grass_snake'}, {'id': 2368, 'synset': 'red-bellied_snake.n.01', 'name': 'red-bellied_snake'}, {'id': 2369, 'synset': 'sand_snake.n.01', 'name': 'sand_snake'}, {'id': 2370, 'synset': 'banded_sand_snake.n.01', 'name': 'banded_sand_snake'}, {'id': 2371, 'synset': 'black-headed_snake.n.01', 'name': 'black-headed_snake'}, {'id': 2372, 'synset': 'vine_snake.n.01', 'name': 'vine_snake'}, {'id': 2373, 'synset': 'lyre_snake.n.01', 'name': 'lyre_snake'}, {'id': 2374, 'synset': 'sonoran_lyre_snake.n.01', 'name': 'Sonoran_lyre_snake'}, {'id': 2375, 'synset': 'night_snake.n.01', 'name': 'night_snake'}, {'id': 2376, 'synset': 'blind_snake.n.01', 'name': 'blind_snake'}, {'id': 2377, 'synset': 'western_blind_snake.n.01', 'name': 'western_blind_snake'}, {'id': 2378, 'synset': 'indigo_snake.n.01', 'name': 'indigo_snake'}, {'id': 2379, 'synset': 'eastern_indigo_snake.n.01', 'name': 'eastern_indigo_snake'}, {'id': 2380, 'synset': 'constrictor.n.01', 'name': 'constrictor'}, {'id': 2381, 'synset': 'boa.n.02', 'name': 'boa'}, {'id': 2382, 'synset': 'boa_constrictor.n.01', 'name': 'boa_constrictor'}, {'id': 2383, 'synset': 'rubber_boa.n.01', 'name': 'rubber_boa'}, {'id': 2384, 'synset': 'rosy_boa.n.01', 'name': 'rosy_boa'}, {'id': 2385, 'synset': 'anaconda.n.01', 'name': 'anaconda'}, {'id': 2386, 'synset': 'python.n.01', 'name': 'python'}, {'id': 2387, 'synset': 'carpet_snake.n.01', 'name': 'carpet_snake'}, {'id': 2388, 'synset': 'reticulated_python.n.01', 'name': 'reticulated_python'}, {'id': 2389, 'synset': 'indian_python.n.01', 'name': 'Indian_python'}, {'id': 2390, 'synset': 'rock_python.n.01', 'name': 'rock_python'}, {'id': 2391, 'synset': 'amethystine_python.n.01', 'name': 'amethystine_python'}, {'id': 2392, 'synset': 'elapid.n.01', 'name': 'elapid'}, {'id': 2393, 'synset': 'coral_snake.n.02', 'name': 'coral_snake'}, {'id': 2394, 'synset': 'eastern_coral_snake.n.01', 'name': 'eastern_coral_snake'}, {'id': 2395, 'synset': 'western_coral_snake.n.01', 'name': 'western_coral_snake'}, {'id': 2396, 'synset': 'coral_snake.n.01', 'name': 'coral_snake'}, {'id': 2397, 'synset': 'african_coral_snake.n.01', 'name': 'African_coral_snake'}, {'id': 2398, 'synset': 'australian_coral_snake.n.01', 'name': 'Australian_coral_snake'}, {'id': 2399, 'synset': 'copperhead.n.02', 'name': 'copperhead'}, {'id': 2400, 'synset': 'cobra.n.01', 'name': 'cobra'}, {'id': 2401, 'synset': 'indian_cobra.n.01', 'name': 'Indian_cobra'}, {'id': 2402, 'synset': 'asp.n.02', 'name': 'asp'}, {'id': 2403, 'synset': 'black-necked_cobra.n.01', 'name': 'black-necked_cobra'}, {'id': 2404, 'synset': 'hamadryad.n.02', 'name': 'hamadryad'}, {'id': 2405, 'synset': 'ringhals.n.01', 'name': 'ringhals'}, {'id': 2406, 'synset': 'mamba.n.01', 'name': 'mamba'}, {'id': 2407, 'synset': 'black_mamba.n.01', 'name': 'black_mamba'}, {'id': 2408, 'synset': 'green_mamba.n.01', 'name': 'green_mamba'}, {'id': 2409, 'synset': 'death_adder.n.01', 'name': 'death_adder'}, {'id': 2410, 'synset': 'tiger_snake.n.01', 'name': 'tiger_snake'}, {'id': 2411, 'synset': 'australian_blacksnake.n.01', 'name': 'Australian_blacksnake'}, {'id': 2412, 'synset': 'krait.n.01', 'name': 'krait'}, {'id': 2413, 'synset': 'banded_krait.n.01', 'name': 'banded_krait'}, {'id': 2414, 'synset': 'taipan.n.01', 'name': 'taipan'}, {'id': 2415, 'synset': 'sea_snake.n.01', 'name': 'sea_snake'}, {'id': 2416, 'synset': 'viper.n.01', 'name': 'viper'}, {'id': 2417, 'synset': 'adder.n.03', 'name': 'adder'}, {'id': 2418, 'synset': 'asp.n.01', 'name': 'asp'}, {'id': 2419, 'synset': 'puff_adder.n.01', 'name': 'puff_adder'}, {'id': 2420, 'synset': 'gaboon_viper.n.01', 'name': 'gaboon_viper'}, {'id': 2421, 'synset': 'horned_viper.n.01', 'name': 'horned_viper'}, {'id': 2422, 'synset': 'pit_viper.n.01', 'name': 'pit_viper'}, {'id': 2423, 'synset': 'copperhead.n.01', 'name': 'copperhead'}, {'id': 2424, 'synset': 'water_moccasin.n.01', 'name': 'water_moccasin'}, {'id': 2425, 'synset': 'rattlesnake.n.01', 'name': 'rattlesnake'}, {'id': 2426, 'synset': 'diamondback.n.01', 'name': 'diamondback'}, {'id': 2427, 'synset': 'timber_rattlesnake.n.01', 'name': 'timber_rattlesnake'}, {'id': 2428, 'synset': 'canebrake_rattlesnake.n.01', 'name': 'canebrake_rattlesnake'}, {'id': 2429, 'synset': 'prairie_rattlesnake.n.01', 'name': 'prairie_rattlesnake'}, {'id': 2430, 'synset': 'sidewinder.n.01', 'name': 'sidewinder'}, {'id': 2431, 'synset': 'western_diamondback.n.01', 'name': 'Western_diamondback'}, {'id': 2432, 'synset': 'rock_rattlesnake.n.01', 'name': 'rock_rattlesnake'}, {'id': 2433, 'synset': 'tiger_rattlesnake.n.01', 'name': 'tiger_rattlesnake'}, {'id': 2434, 'synset': 'mojave_rattlesnake.n.01', 'name': 'Mojave_rattlesnake'}, {'id': 2435, 'synset': 'speckled_rattlesnake.n.01', 'name': 'speckled_rattlesnake'}, {'id': 2436, 'synset': 'massasauga.n.02', 'name': 'massasauga'}, {'id': 2437, 'synset': 'ground_rattler.n.01', 'name': 'ground_rattler'}, {'id': 2438, 'synset': 'fer-de-lance.n.01', 'name': 'fer-de-lance'}, {'id': 2439, 'synset': 'carcase.n.01', 'name': 'carcase'}, {'id': 2440, 'synset': 'carrion.n.01', 'name': 'carrion'}, {'id': 2441, 'synset': 'arthropod.n.01', 'name': 'arthropod'}, {'id': 2442, 'synset': 'trilobite.n.01', 'name': 'trilobite'}, {'id': 2443, 'synset': 'arachnid.n.01', 'name': 'arachnid'}, {'id': 2444, 'synset': 'harvestman.n.01', 'name': 'harvestman'}, {'id': 2445, 'synset': 'scorpion.n.03', 'name': 'scorpion'}, {'id': 2446, 'synset': 'false_scorpion.n.01', 'name': 'false_scorpion'}, {'id': 2447, 'synset': 'book_scorpion.n.01', 'name': 'book_scorpion'}, {'id': 2448, 'synset': 'whip-scorpion.n.01', 'name': 'whip-scorpion'}, {'id': 2449, 'synset': 'vinegarroon.n.01', 'name': 'vinegarroon'}, {'id': 2450, 'synset': 'orb-weaving_spider.n.01', 'name': 'orb-weaving_spider'}, {'id': 2451, 'synset': 'black_and_gold_garden_spider.n.01', 'name': 'black_and_gold_garden_spider'}, {'id': 2452, 'synset': 'barn_spider.n.01', 'name': 'barn_spider'}, {'id': 2453, 'synset': 'garden_spider.n.01', 'name': 'garden_spider'}, {'id': 2454, 'synset': 'comb-footed_spider.n.01', 'name': 'comb-footed_spider'}, {'id': 2455, 'synset': 'black_widow.n.01', 'name': 'black_widow'}, {'id': 2456, 'synset': 'tarantula.n.02', 'name': 'tarantula'}, {'id': 2457, 'synset': 'wolf_spider.n.01', 'name': 'wolf_spider'}, {'id': 2458, 'synset': 'european_wolf_spider.n.01', 'name': 'European_wolf_spider'}, {'id': 2459, 'synset': 'trap-door_spider.n.01', 'name': 'trap-door_spider'}, {'id': 2460, 'synset': 'acarine.n.01', 'name': 'acarine'}, {'id': 2461, 'synset': 'tick.n.02', 'name': 'tick'}, {'id': 2462, 'synset': 'hard_tick.n.01', 'name': 'hard_tick'}, {'id': 2463, 'synset': 'ixodes_dammini.n.01', 'name': 'Ixodes_dammini'}, {'id': 2464, 'synset': 'ixodes_neotomae.n.01', 'name': 'Ixodes_neotomae'}, {'id': 2465, 'synset': 'ixodes_pacificus.n.01', 'name': 'Ixodes_pacificus'}, {'id': 2466, 'synset': 'ixodes_scapularis.n.01', 'name': 'Ixodes_scapularis'}, {'id': 2467, 'synset': 'sheep-tick.n.02', 'name': 'sheep-tick'}, {'id': 2468, 'synset': 'ixodes_persulcatus.n.01', 'name': 'Ixodes_persulcatus'}, {'id': 2469, 'synset': 'ixodes_dentatus.n.01', 'name': 'Ixodes_dentatus'}, {'id': 2470, 'synset': 'ixodes_spinipalpis.n.01', 'name': 'Ixodes_spinipalpis'}, {'id': 2471, 'synset': 'wood_tick.n.01', 'name': 'wood_tick'}, {'id': 2472, 'synset': 'soft_tick.n.01', 'name': 'soft_tick'}, {'id': 2473, 'synset': 'mite.n.02', 'name': 'mite'}, {'id': 2474, 'synset': 'web-spinning_mite.n.01', 'name': 'web-spinning_mite'}, {'id': 2475, 'synset': 'acarid.n.01', 'name': 'acarid'}, {'id': 2476, 'synset': 'trombidiid.n.01', 'name': 'trombidiid'}, {'id': 2477, 'synset': 'trombiculid.n.01', 'name': 'trombiculid'}, {'id': 2478, 'synset': 'harvest_mite.n.01', 'name': 'harvest_mite'}, {'id': 2479, 'synset': 'acarus.n.01', 'name': 'acarus'}, {'id': 2480, 'synset': 'itch_mite.n.01', 'name': 'itch_mite'}, {'id': 2481, 'synset': 'rust_mite.n.01', 'name': 'rust_mite'}, {'id': 2482, 'synset': 'spider_mite.n.01', 'name': 'spider_mite'}, {'id': 2483, 'synset': 'red_spider.n.01', 'name': 'red_spider'}, {'id': 2484, 'synset': 'myriapod.n.01', 'name': 'myriapod'}, {'id': 2485, 'synset': 'garden_centipede.n.01', 'name': 'garden_centipede'}, {'id': 2486, 'synset': 'tardigrade.n.01', 'name': 'tardigrade'}, {'id': 2487, 'synset': 'centipede.n.01', 'name': 'centipede'}, {'id': 2488, 'synset': 'house_centipede.n.01', 'name': 'house_centipede'}, {'id': 2489, 'synset': 'millipede.n.01', 'name': 'millipede'}, {'id': 2490, 'synset': 'sea_spider.n.01', 'name': 'sea_spider'}, {'id': 2491, 'synset': 'merostomata.n.01', 'name': 'Merostomata'}, {'id': 2492, 'synset': 'horseshoe_crab.n.01', 'name': 'horseshoe_crab'}, {'id': 2493, 'synset': 'asian_horseshoe_crab.n.01', 'name': 'Asian_horseshoe_crab'}, {'id': 2494, 'synset': 'eurypterid.n.01', 'name': 'eurypterid'}, {'id': 2495, 'synset': 'tongue_worm.n.01', 'name': 'tongue_worm'}, {'id': 2496, 'synset': 'gallinaceous_bird.n.01', 'name': 'gallinaceous_bird'}, {'id': 2497, 'synset': 'domestic_fowl.n.01', 'name': 'domestic_fowl'}, {'id': 2498, 'synset': 'dorking.n.01', 'name': 'Dorking'}, {'id': 2499, 'synset': 'plymouth_rock.n.02', 'name': 'Plymouth_Rock'}, {'id': 2500, 'synset': 'cornish.n.02', 'name': 'Cornish'}, {'id': 2501, 'synset': 'rock_cornish.n.01', 'name': 'Rock_Cornish'}, {'id': 2502, 'synset': 'game_fowl.n.01', 'name': 'game_fowl'}, {'id': 2503, 'synset': 'cochin.n.01', 'name': 'cochin'}, {'id': 2504, 'synset': 'jungle_fowl.n.01', 'name': 'jungle_fowl'}, {'id': 2505, 'synset': 'jungle_cock.n.01', 'name': 'jungle_cock'}, {'id': 2506, 'synset': 'jungle_hen.n.01', 'name': 'jungle_hen'}, {'id': 2507, 'synset': 'red_jungle_fowl.n.01', 'name': 'red_jungle_fowl'}, {'id': 2508, 'synset': 'bantam.n.01', 'name': 'bantam'}, {'id': 2509, 'synset': 'chick.n.01', 'name': 'chick'}, {'id': 2510, 'synset': 'cockerel.n.01', 'name': 'cockerel'}, {'id': 2511, 'synset': 'capon.n.02', 'name': 'capon'}, {'id': 2512, 'synset': 'hen.n.01', 'name': 'hen'}, {'id': 2513, 'synset': 'cackler.n.01', 'name': 'cackler'}, {'id': 2514, 'synset': 'brood_hen.n.01', 'name': 'brood_hen'}, {'id': 2515, 'synset': 'mother_hen.n.02', 'name': 'mother_hen'}, {'id': 2516, 'synset': 'layer.n.04', 'name': 'layer'}, {'id': 2517, 'synset': 'pullet.n.02', 'name': 'pullet'}, {'id': 2518, 'synset': 'spring_chicken.n.02', 'name': 'spring_chicken'}, {'id': 2519, 'synset': 'rhode_island_red.n.01', 'name': 'Rhode_Island_red'}, {'id': 2520, 'synset': 'dominique.n.01', 'name': 'Dominique'}, {'id': 2521, 'synset': 'orpington.n.01', 'name': 'Orpington'}, {'id': 2522, 'synset': 'turkey.n.01', 'name': 'turkey'}, {'id': 2523, 'synset': 'turkey_cock.n.01', 'name': 'turkey_cock'}, {'id': 2524, 'synset': 'ocellated_turkey.n.01', 'name': 'ocellated_turkey'}, {'id': 2525, 'synset': 'grouse.n.02', 'name': 'grouse'}, {'id': 2526, 'synset': 'black_grouse.n.01', 'name': 'black_grouse'}, {'id': 2527, 'synset': 'european_black_grouse.n.01', 'name': 'European_black_grouse'}, {'id': 2528, 'synset': 'asian_black_grouse.n.01', 'name': 'Asian_black_grouse'}, {'id': 2529, 'synset': 'blackcock.n.01', 'name': 'blackcock'}, {'id': 2530, 'synset': 'greyhen.n.01', 'name': 'greyhen'}, {'id': 2531, 'synset': 'ptarmigan.n.01', 'name': 'ptarmigan'}, {'id': 2532, 'synset': 'red_grouse.n.01', 'name': 'red_grouse'}, {'id': 2533, 'synset': 'moorhen.n.02', 'name': 'moorhen'}, {'id': 2534, 'synset': 'capercaillie.n.01', 'name': 'capercaillie'}, {'id': 2535, 'synset': 'spruce_grouse.n.01', 'name': 'spruce_grouse'}, {'id': 2536, 'synset': 'sage_grouse.n.01', 'name': 'sage_grouse'}, {'id': 2537, 'synset': 'ruffed_grouse.n.01', 'name': 'ruffed_grouse'}, {'id': 2538, 'synset': 'sharp-tailed_grouse.n.01', 'name': 'sharp-tailed_grouse'}, {'id': 2539, 'synset': 'prairie_chicken.n.01', 'name': 'prairie_chicken'}, {'id': 2540, 'synset': 'greater_prairie_chicken.n.01', 'name': 'greater_prairie_chicken'}, {'id': 2541, 'synset': 'lesser_prairie_chicken.n.01', 'name': 'lesser_prairie_chicken'}, {'id': 2542, 'synset': 'heath_hen.n.01', 'name': 'heath_hen'}, {'id': 2543, 'synset': 'guan.n.01', 'name': 'guan'}, {'id': 2544, 'synset': 'curassow.n.01', 'name': 'curassow'}, {'id': 2545, 'synset': 'piping_guan.n.01', 'name': 'piping_guan'}, {'id': 2546, 'synset': 'chachalaca.n.01', 'name': 'chachalaca'}, {'id': 2547, 'synset': 'texas_chachalaca.n.01', 'name': 'Texas_chachalaca'}, {'id': 2548, 'synset': 'megapode.n.01', 'name': 'megapode'}, {'id': 2549, 'synset': 'mallee_fowl.n.01', 'name': 'mallee_fowl'}, {'id': 2550, 'synset': 'mallee_hen.n.01', 'name': 'mallee_hen'}, {'id': 2551, 'synset': 'brush_turkey.n.01', 'name': 'brush_turkey'}, {'id': 2552, 'synset': 'maleo.n.01', 'name': 'maleo'}, {'id': 2553, 'synset': 'phasianid.n.01', 'name': 'phasianid'}, {'id': 2554, 'synset': 'pheasant.n.01', 'name': 'pheasant'}, {'id': 2555, 'synset': 'ring-necked_pheasant.n.01', 'name': 'ring-necked_pheasant'}, {'id': 2556, 'synset': 'afropavo.n.01', 'name': 'afropavo'}, {'id': 2557, 'synset': 'argus.n.02', 'name': 'argus'}, {'id': 2558, 'synset': 'golden_pheasant.n.01', 'name': 'golden_pheasant'}, {'id': 2559, 'synset': 'bobwhite.n.01', 'name': 'bobwhite'}, {'id': 2560, 'synset': 'northern_bobwhite.n.01', 'name': 'northern_bobwhite'}, {'id': 2561, 'synset': 'old_world_quail.n.01', 'name': 'Old_World_quail'}, {'id': 2562, 'synset': 'migratory_quail.n.01', 'name': 'migratory_quail'}, {'id': 2563, 'synset': 'monal.n.01', 'name': 'monal'}, {'id': 2564, 'synset': 'peafowl.n.01', 'name': 'peafowl'}, {'id': 2565, 'synset': 'peachick.n.01', 'name': 'peachick'}, {'id': 2566, 'synset': 'peacock.n.02', 'name': 'peacock'}, {'id': 2567, 'synset': 'peahen.n.01', 'name': 'peahen'}, {'id': 2568, 'synset': 'blue_peafowl.n.01', 'name': 'blue_peafowl'}, {'id': 2569, 'synset': 'green_peafowl.n.01', 'name': 'green_peafowl'}, {'id': 2570, 'synset': 'quail.n.02', 'name': 'quail'}, {'id': 2571, 'synset': 'california_quail.n.01', 'name': 'California_quail'}, {'id': 2572, 'synset': 'tragopan.n.01', 'name': 'tragopan'}, {'id': 2573, 'synset': 'partridge.n.03', 'name': 'partridge'}, {'id': 2574, 'synset': 'hungarian_partridge.n.01', 'name': 'Hungarian_partridge'}, {'id': 2575, 'synset': 'red-legged_partridge.n.01', 'name': 'red-legged_partridge'}, {'id': 2576, 'synset': 'greek_partridge.n.01', 'name': 'Greek_partridge'}, {'id': 2577, 'synset': 'mountain_quail.n.01', 'name': 'mountain_quail'}, {'id': 2578, 'synset': 'guinea_fowl.n.01', 'name': 'guinea_fowl'}, {'id': 2579, 'synset': 'guinea_hen.n.02', 'name': 'guinea_hen'}, {'id': 2580, 'synset': 'hoatzin.n.01', 'name': 'hoatzin'}, {'id': 2581, 'synset': 'tinamou.n.01', 'name': 'tinamou'}, {'id': 2582, 'synset': 'columbiform_bird.n.01', 'name': 'columbiform_bird'}, {'id': 2583, 'synset': 'dodo.n.02', 'name': 'dodo'}, {'id': 2584, 'synset': 'pouter_pigeon.n.01', 'name': 'pouter_pigeon'}, {'id': 2585, 'synset': 'rock_dove.n.01', 'name': 'rock_dove'}, {'id': 2586, 'synset': 'band-tailed_pigeon.n.01', 'name': 'band-tailed_pigeon'}, {'id': 2587, 'synset': 'wood_pigeon.n.01', 'name': 'wood_pigeon'}, {'id': 2588, 'synset': 'turtledove.n.02', 'name': 'turtledove'}, {'id': 2589, 'synset': 'streptopelia_turtur.n.01', 'name': 'Streptopelia_turtur'}, {'id': 2590, 'synset': 'ringdove.n.01', 'name': 'ringdove'}, {'id': 2591, 'synset': 'australian_turtledove.n.01', 'name': 'Australian_turtledove'}, {'id': 2592, 'synset': 'mourning_dove.n.01', 'name': 'mourning_dove'}, {'id': 2593, 'synset': 'domestic_pigeon.n.01', 'name': 'domestic_pigeon'}, {'id': 2594, 'synset': 'squab.n.03', 'name': 'squab'}, {'id': 2595, 'synset': 'fairy_swallow.n.01', 'name': 'fairy_swallow'}, {'id': 2596, 'synset': 'roller.n.07', 'name': 'roller'}, {'id': 2597, 'synset': 'homing_pigeon.n.01', 'name': 'homing_pigeon'}, {'id': 2598, 'synset': 'carrier_pigeon.n.01', 'name': 'carrier_pigeon'}, {'id': 2599, 'synset': 'passenger_pigeon.n.01', 'name': 'passenger_pigeon'}, {'id': 2600, 'synset': 'sandgrouse.n.01', 'name': 'sandgrouse'}, {'id': 2601, 'synset': 'painted_sandgrouse.n.01', 'name': 'painted_sandgrouse'}, {'id': 2602, 'synset': 'pin-tailed_sandgrouse.n.01', 'name': 'pin-tailed_sandgrouse'}, {'id': 2603, 'synset': "pallas's_sandgrouse.n.01", 'name': "pallas's_sandgrouse"}, {'id': 2604, 'synset': 'popinjay.n.02', 'name': 'popinjay'}, {'id': 2605, 'synset': 'poll.n.04', 'name': 'poll'}, {'id': 2606, 'synset': 'african_grey.n.01', 'name': 'African_grey'}, {'id': 2607, 'synset': 'amazon.n.04', 'name': 'amazon'}, {'id': 2608, 'synset': 'macaw.n.01', 'name': 'macaw'}, {'id': 2609, 'synset': 'kea.n.01', 'name': 'kea'}, {'id': 2610, 'synset': 'cockatoo.n.01', 'name': 'cockatoo'}, {'id': 2611, 'synset': 'sulphur-crested_cockatoo.n.01', 'name': 'sulphur-crested_cockatoo'}, {'id': 2612, 'synset': 'pink_cockatoo.n.01', 'name': 'pink_cockatoo'}, {'id': 2613, 'synset': 'cockateel.n.01', 'name': 'cockateel'}, {'id': 2614, 'synset': 'lovebird.n.02', 'name': 'lovebird'}, {'id': 2615, 'synset': 'lory.n.01', 'name': 'lory'}, {'id': 2616, 'synset': 'lorikeet.n.01', 'name': 'lorikeet'}, {'id': 2617, 'synset': 'varied_lorikeet.n.01', 'name': 'varied_Lorikeet'}, {'id': 2618, 'synset': 'rainbow_lorikeet.n.01', 'name': 'rainbow_lorikeet'}, {'id': 2619, 'synset': 'carolina_parakeet.n.01', 'name': 'Carolina_parakeet'}, {'id': 2620, 'synset': 'budgerigar.n.01', 'name': 'budgerigar'}, {'id': 2621, 'synset': 'ring-necked_parakeet.n.01', 'name': 'ring-necked_parakeet'}, {'id': 2622, 'synset': 'cuculiform_bird.n.01', 'name': 'cuculiform_bird'}, {'id': 2623, 'synset': 'cuckoo.n.02', 'name': 'cuckoo'}, {'id': 2624, 'synset': 'european_cuckoo.n.01', 'name': 'European_cuckoo'}, {'id': 2625, 'synset': 'black-billed_cuckoo.n.01', 'name': 'black-billed_cuckoo'}, {'id': 2626, 'synset': 'roadrunner.n.01', 'name': 'roadrunner'}, {'id': 2627, 'synset': 'ani.n.01', 'name': 'ani'}, {'id': 2628, 'synset': 'coucal.n.01', 'name': 'coucal'}, {'id': 2629, 'synset': 'crow_pheasant.n.01', 'name': 'crow_pheasant'}, {'id': 2630, 'synset': 'touraco.n.01', 'name': 'touraco'}, {'id': 2631, 'synset': 'coraciiform_bird.n.01', 'name': 'coraciiform_bird'}, {'id': 2632, 'synset': 'roller.n.06', 'name': 'roller'}, {'id': 2633, 'synset': 'european_roller.n.01', 'name': 'European_roller'}, {'id': 2634, 'synset': 'ground_roller.n.01', 'name': 'ground_roller'}, {'id': 2635, 'synset': 'kingfisher.n.01', 'name': 'kingfisher'}, {'id': 2636, 'synset': 'eurasian_kingfisher.n.01', 'name': 'Eurasian_kingfisher'}, {'id': 2637, 'synset': 'belted_kingfisher.n.01', 'name': 'belted_kingfisher'}, {'id': 2638, 'synset': 'kookaburra.n.01', 'name': 'kookaburra'}, {'id': 2639, 'synset': 'bee_eater.n.01', 'name': 'bee_eater'}, {'id': 2640, 'synset': 'hornbill.n.01', 'name': 'hornbill'}, {'id': 2641, 'synset': 'hoopoe.n.01', 'name': 'hoopoe'}, {'id': 2642, 'synset': 'euopean_hoopoe.n.01', 'name': 'Euopean_hoopoe'}, {'id': 2643, 'synset': 'wood_hoopoe.n.01', 'name': 'wood_hoopoe'}, {'id': 2644, 'synset': 'motmot.n.01', 'name': 'motmot'}, {'id': 2645, 'synset': 'tody.n.01', 'name': 'tody'}, {'id': 2646, 'synset': 'apodiform_bird.n.01', 'name': 'apodiform_bird'}, {'id': 2647, 'synset': 'swift.n.03', 'name': 'swift'}, {'id': 2648, 'synset': 'european_swift.n.01', 'name': 'European_swift'}, {'id': 2649, 'synset': 'chimney_swift.n.01', 'name': 'chimney_swift'}, {'id': 2650, 'synset': 'swiftlet.n.01', 'name': 'swiftlet'}, {'id': 2651, 'synset': 'tree_swift.n.01', 'name': 'tree_swift'}, {'id': 2652, 'synset': 'archilochus_colubris.n.01', 'name': 'Archilochus_colubris'}, {'id': 2653, 'synset': 'thornbill.n.01', 'name': 'thornbill'}, {'id': 2654, 'synset': 'goatsucker.n.01', 'name': 'goatsucker'}, {'id': 2655, 'synset': 'european_goatsucker.n.01', 'name': 'European_goatsucker'}, {'id': 2656, 'synset': "chuck-will's-widow.n.01", 'name': "chuck-will's-widow"}, {'id': 2657, 'synset': 'whippoorwill.n.01', 'name': 'whippoorwill'}, {'id': 2658, 'synset': 'poorwill.n.01', 'name': 'poorwill'}, {'id': 2659, 'synset': 'frogmouth.n.01', 'name': 'frogmouth'}, {'id': 2660, 'synset': 'oilbird.n.01', 'name': 'oilbird'}, {'id': 2661, 'synset': 'piciform_bird.n.01', 'name': 'piciform_bird'}, {'id': 2662, 'synset': 'woodpecker.n.01', 'name': 'woodpecker'}, {'id': 2663, 'synset': 'green_woodpecker.n.01', 'name': 'green_woodpecker'}, {'id': 2664, 'synset': 'downy_woodpecker.n.01', 'name': 'downy_woodpecker'}, {'id': 2665, 'synset': 'flicker.n.02', 'name': 'flicker'}, {'id': 2666, 'synset': 'yellow-shafted_flicker.n.01', 'name': 'yellow-shafted_flicker'}, {'id': 2667, 'synset': 'gilded_flicker.n.01', 'name': 'gilded_flicker'}, {'id': 2668, 'synset': 'red-shafted_flicker.n.01', 'name': 'red-shafted_flicker'}, {'id': 2669, 'synset': 'ivorybill.n.01', 'name': 'ivorybill'}, {'id': 2670, 'synset': 'redheaded_woodpecker.n.01', 'name': 'redheaded_woodpecker'}, {'id': 2671, 'synset': 'sapsucker.n.01', 'name': 'sapsucker'}, {'id': 2672, 'synset': 'yellow-bellied_sapsucker.n.01', 'name': 'yellow-bellied_sapsucker'}, {'id': 2673, 'synset': 'red-breasted_sapsucker.n.01', 'name': 'red-breasted_sapsucker'}, {'id': 2674, 'synset': 'wryneck.n.02', 'name': 'wryneck'}, {'id': 2675, 'synset': 'piculet.n.01', 'name': 'piculet'}, {'id': 2676, 'synset': 'barbet.n.01', 'name': 'barbet'}, {'id': 2677, 'synset': 'puffbird.n.01', 'name': 'puffbird'}, {'id': 2678, 'synset': 'honey_guide.n.01', 'name': 'honey_guide'}, {'id': 2679, 'synset': 'jacamar.n.01', 'name': 'jacamar'}, {'id': 2680, 'synset': 'toucan.n.01', 'name': 'toucan'}, {'id': 2681, 'synset': 'toucanet.n.01', 'name': 'toucanet'}, {'id': 2682, 'synset': 'trogon.n.01', 'name': 'trogon'}, {'id': 2683, 'synset': 'quetzal.n.02', 'name': 'quetzal'}, {'id': 2684, 'synset': 'resplendent_quetzel.n.01', 'name': 'resplendent_quetzel'}, {'id': 2685, 'synset': 'aquatic_bird.n.01', 'name': 'aquatic_bird'}, {'id': 2686, 'synset': 'waterfowl.n.01', 'name': 'waterfowl'}, {'id': 2687, 'synset': 'anseriform_bird.n.01', 'name': 'anseriform_bird'}, {'id': 2688, 'synset': 'drake.n.02', 'name': 'drake'}, {'id': 2689, 'synset': 'quack-quack.n.01', 'name': 'quack-quack'}, {'id': 2690, 'synset': 'diving_duck.n.01', 'name': 'diving_duck'}, {'id': 2691, 'synset': 'dabbling_duck.n.01', 'name': 'dabbling_duck'}, {'id': 2692, 'synset': 'black_duck.n.01', 'name': 'black_duck'}, {'id': 2693, 'synset': 'teal.n.02', 'name': 'teal'}, {'id': 2694, 'synset': 'greenwing.n.01', 'name': 'greenwing'}, {'id': 2695, 'synset': 'bluewing.n.01', 'name': 'bluewing'}, {'id': 2696, 'synset': 'garganey.n.01', 'name': 'garganey'}, {'id': 2697, 'synset': 'widgeon.n.01', 'name': 'widgeon'}, {'id': 2698, 'synset': 'american_widgeon.n.01', 'name': 'American_widgeon'}, {'id': 2699, 'synset': 'shoveler.n.02', 'name': 'shoveler'}, {'id': 2700, 'synset': 'pintail.n.01', 'name': 'pintail'}, {'id': 2701, 'synset': 'sheldrake.n.02', 'name': 'sheldrake'}, {'id': 2702, 'synset': 'shelduck.n.01', 'name': 'shelduck'}, {'id': 2703, 'synset': 'ruddy_duck.n.01', 'name': 'ruddy_duck'}, {'id': 2704, 'synset': 'bufflehead.n.01', 'name': 'bufflehead'}, {'id': 2705, 'synset': 'goldeneye.n.02', 'name': 'goldeneye'}, {'id': 2706, 'synset': "barrow's_goldeneye.n.01", 'name': "Barrow's_goldeneye"}, {'id': 2707, 'synset': 'canvasback.n.01', 'name': 'canvasback'}, {'id': 2708, 'synset': 'pochard.n.01', 'name': 'pochard'}, {'id': 2709, 'synset': 'redhead.n.02', 'name': 'redhead'}, {'id': 2710, 'synset': 'scaup.n.01', 'name': 'scaup'}, {'id': 2711, 'synset': 'greater_scaup.n.01', 'name': 'greater_scaup'}, {'id': 2712, 'synset': 'lesser_scaup.n.01', 'name': 'lesser_scaup'}, {'id': 2713, 'synset': 'wild_duck.n.01', 'name': 'wild_duck'}, {'id': 2714, 'synset': 'wood_duck.n.01', 'name': 'wood_duck'}, {'id': 2715, 'synset': 'wood_drake.n.01', 'name': 'wood_drake'}, {'id': 2716, 'synset': 'mandarin_duck.n.01', 'name': 'mandarin_duck'}, {'id': 2717, 'synset': 'muscovy_duck.n.01', 'name': 'muscovy_duck'}, {'id': 2718, 'synset': 'sea_duck.n.01', 'name': 'sea_duck'}, {'id': 2719, 'synset': 'eider.n.01', 'name': 'eider'}, {'id': 2720, 'synset': 'scoter.n.01', 'name': 'scoter'}, {'id': 2721, 'synset': 'common_scoter.n.01', 'name': 'common_scoter'}, {'id': 2722, 'synset': 'old_squaw.n.01', 'name': 'old_squaw'}, {'id': 2723, 'synset': 'merganser.n.01', 'name': 'merganser'}, {'id': 2724, 'synset': 'goosander.n.01', 'name': 'goosander'}, {'id': 2725, 'synset': 'american_merganser.n.01', 'name': 'American_merganser'}, {'id': 2726, 'synset': 'red-breasted_merganser.n.01', 'name': 'red-breasted_merganser'}, {'id': 2727, 'synset': 'smew.n.01', 'name': 'smew'}, {'id': 2728, 'synset': 'hooded_merganser.n.01', 'name': 'hooded_merganser'}, {'id': 2729, 'synset': 'gosling.n.01', 'name': 'gosling'}, {'id': 2730, 'synset': 'gander.n.01', 'name': 'gander'}, {'id': 2731, 'synset': 'chinese_goose.n.01', 'name': 'Chinese_goose'}, {'id': 2732, 'synset': 'greylag.n.01', 'name': 'greylag'}, {'id': 2733, 'synset': 'blue_goose.n.01', 'name': 'blue_goose'}, {'id': 2734, 'synset': 'snow_goose.n.01', 'name': 'snow_goose'}, {'id': 2735, 'synset': 'brant.n.01', 'name': 'brant'}, {'id': 2736, 'synset': 'common_brant_goose.n.01', 'name': 'common_brant_goose'}, {'id': 2737, 'synset': 'honker.n.03', 'name': 'honker'}, {'id': 2738, 'synset': 'barnacle_goose.n.01', 'name': 'barnacle_goose'}, {'id': 2739, 'synset': 'coscoroba.n.01', 'name': 'coscoroba'}, {'id': 2740, 'synset': 'swan.n.01', 'name': 'swan'}, {'id': 2741, 'synset': 'cob.n.04', 'name': 'cob'}, {'id': 2742, 'synset': 'pen.n.05', 'name': 'pen'}, {'id': 2743, 'synset': 'cygnet.n.01', 'name': 'cygnet'}, {'id': 2744, 'synset': 'mute_swan.n.01', 'name': 'mute_swan'}, {'id': 2745, 'synset': 'whooper.n.02', 'name': 'whooper'}, {'id': 2746, 'synset': 'tundra_swan.n.01', 'name': 'tundra_swan'}, {'id': 2747, 'synset': 'whistling_swan.n.01', 'name': 'whistling_swan'}, {'id': 2748, 'synset': "bewick's_swan.n.01", 'name': "Bewick's_swan"}, {'id': 2749, 'synset': 'trumpeter.n.04', 'name': 'trumpeter'}, {'id': 2750, 'synset': 'black_swan.n.01', 'name': 'black_swan'}, {'id': 2751, 'synset': 'screamer.n.03', 'name': 'screamer'}, {'id': 2752, 'synset': 'horned_screamer.n.01', 'name': 'horned_screamer'}, {'id': 2753, 'synset': 'crested_screamer.n.01', 'name': 'crested_screamer'}, {'id': 2754, 'synset': 'chaja.n.01', 'name': 'chaja'}, {'id': 2755, 'synset': 'mammal.n.01', 'name': 'mammal'}, {'id': 2756, 'synset': 'female_mammal.n.01', 'name': 'female_mammal'}, {'id': 2757, 'synset': 'tusker.n.01', 'name': 'tusker'}, {'id': 2758, 'synset': 'prototherian.n.01', 'name': 'prototherian'}, {'id': 2759, 'synset': 'monotreme.n.01', 'name': 'monotreme'}, {'id': 2760, 'synset': 'echidna.n.02', 'name': 'echidna'}, {'id': 2761, 'synset': 'echidna.n.01', 'name': 'echidna'}, {'id': 2762, 'synset': 'platypus.n.01', 'name': 'platypus'}, {'id': 2763, 'synset': 'marsupial.n.01', 'name': 'marsupial'}, {'id': 2764, 'synset': 'opossum.n.02', 'name': 'opossum'}, {'id': 2765, 'synset': 'common_opossum.n.01', 'name': 'common_opossum'}, {'id': 2766, 'synset': 'crab-eating_opossum.n.01', 'name': 'crab-eating_opossum'}, {'id': 2767, 'synset': 'opossum_rat.n.01', 'name': 'opossum_rat'}, {'id': 2768, 'synset': 'bandicoot.n.01', 'name': 'bandicoot'}, {'id': 2769, 'synset': 'rabbit-eared_bandicoot.n.01', 'name': 'rabbit-eared_bandicoot'}, {'id': 2770, 'synset': 'kangaroo.n.01', 'name': 'kangaroo'}, {'id': 2771, 'synset': 'giant_kangaroo.n.01', 'name': 'giant_kangaroo'}, {'id': 2772, 'synset': 'wallaby.n.01', 'name': 'wallaby'}, {'id': 2773, 'synset': 'common_wallaby.n.01', 'name': 'common_wallaby'}, {'id': 2774, 'synset': 'hare_wallaby.n.01', 'name': 'hare_wallaby'}, {'id': 2775, 'synset': 'nail-tailed_wallaby.n.01', 'name': 'nail-tailed_wallaby'}, {'id': 2776, 'synset': 'rock_wallaby.n.01', 'name': 'rock_wallaby'}, {'id': 2777, 'synset': 'pademelon.n.01', 'name': 'pademelon'}, {'id': 2778, 'synset': 'tree_wallaby.n.01', 'name': 'tree_wallaby'}, {'id': 2779, 'synset': 'musk_kangaroo.n.01', 'name': 'musk_kangaroo'}, {'id': 2780, 'synset': 'rat_kangaroo.n.01', 'name': 'rat_kangaroo'}, {'id': 2781, 'synset': 'potoroo.n.01', 'name': 'potoroo'}, {'id': 2782, 'synset': 'bettong.n.01', 'name': 'bettong'}, {'id': 2783, 'synset': 'jerboa_kangaroo.n.01', 'name': 'jerboa_kangaroo'}, {'id': 2784, 'synset': 'phalanger.n.01', 'name': 'phalanger'}, {'id': 2785, 'synset': 'cuscus.n.01', 'name': 'cuscus'}, {'id': 2786, 'synset': 'brush-tailed_phalanger.n.01', 'name': 'brush-tailed_phalanger'}, {'id': 2787, 'synset': 'flying_phalanger.n.01', 'name': 'flying_phalanger'}, {'id': 2788, 'synset': 'wombat.n.01', 'name': 'wombat'}, {'id': 2789, 'synset': 'dasyurid_marsupial.n.01', 'name': 'dasyurid_marsupial'}, {'id': 2790, 'synset': 'dasyure.n.01', 'name': 'dasyure'}, {'id': 2791, 'synset': 'eastern_dasyure.n.01', 'name': 'eastern_dasyure'}, {'id': 2792, 'synset': 'native_cat.n.01', 'name': 'native_cat'}, {'id': 2793, 'synset': 'thylacine.n.01', 'name': 'thylacine'}, {'id': 2794, 'synset': 'tasmanian_devil.n.01', 'name': 'Tasmanian_devil'}, {'id': 2795, 'synset': 'pouched_mouse.n.01', 'name': 'pouched_mouse'}, {'id': 2796, 'synset': 'numbat.n.01', 'name': 'numbat'}, {'id': 2797, 'synset': 'pouched_mole.n.01', 'name': 'pouched_mole'}, {'id': 2798, 'synset': 'placental.n.01', 'name': 'placental'}, {'id': 2799, 'synset': 'livestock.n.01', 'name': 'livestock'}, {'id': 2800, 'synset': 'cow.n.02', 'name': 'cow'}, {'id': 2801, 'synset': 'calf.n.04', 'name': 'calf'}, {'id': 2802, 'synset': 'yearling.n.03', 'name': 'yearling'}, {'id': 2803, 'synset': 'buck.n.05', 'name': 'buck'}, {'id': 2804, 'synset': 'doe.n.02', 'name': 'doe'}, {'id': 2805, 'synset': 'insectivore.n.01', 'name': 'insectivore'}, {'id': 2806, 'synset': 'mole.n.06', 'name': 'mole'}, {'id': 2807, 'synset': 'starnose_mole.n.01', 'name': 'starnose_mole'}, {'id': 2808, 'synset': "brewer's_mole.n.01", 'name': "brewer's_mole"}, {'id': 2809, 'synset': 'golden_mole.n.01', 'name': 'golden_mole'}, {'id': 2810, 'synset': 'shrew_mole.n.01', 'name': 'shrew_mole'}, {'id': 2811, 'synset': 'asiatic_shrew_mole.n.01', 'name': 'Asiatic_shrew_mole'}, {'id': 2812, 'synset': 'american_shrew_mole.n.01', 'name': 'American_shrew_mole'}, {'id': 2813, 'synset': 'shrew.n.02', 'name': 'shrew'}, {'id': 2814, 'synset': 'common_shrew.n.01', 'name': 'common_shrew'}, {'id': 2815, 'synset': 'masked_shrew.n.01', 'name': 'masked_shrew'}, {'id': 2816, 'synset': 'short-tailed_shrew.n.01', 'name': 'short-tailed_shrew'}, {'id': 2817, 'synset': 'water_shrew.n.01', 'name': 'water_shrew'}, {'id': 2818, 'synset': 'american_water_shrew.n.01', 'name': 'American_water_shrew'}, {'id': 2819, 'synset': 'european_water_shrew.n.01', 'name': 'European_water_shrew'}, {'id': 2820, 'synset': 'mediterranean_water_shrew.n.01', 'name': 'Mediterranean_water_shrew'}, {'id': 2821, 'synset': 'least_shrew.n.01', 'name': 'least_shrew'}, {'id': 2822, 'synset': 'hedgehog.n.02', 'name': 'hedgehog'}, {'id': 2823, 'synset': 'tenrec.n.01', 'name': 'tenrec'}, {'id': 2824, 'synset': 'tailless_tenrec.n.01', 'name': 'tailless_tenrec'}, {'id': 2825, 'synset': 'otter_shrew.n.01', 'name': 'otter_shrew'}, {'id': 2826, 'synset': 'eiderdown.n.02', 'name': 'eiderdown'}, {'id': 2827, 'synset': 'aftershaft.n.01', 'name': 'aftershaft'}, {'id': 2828, 'synset': 'sickle_feather.n.01', 'name': 'sickle_feather'}, {'id': 2829, 'synset': 'contour_feather.n.01', 'name': 'contour_feather'}, {'id': 2830, 'synset': 'bastard_wing.n.01', 'name': 'bastard_wing'}, {'id': 2831, 'synset': 'saddle_hackle.n.01', 'name': 'saddle_hackle'}, {'id': 2832, 'synset': 'encolure.n.01', 'name': 'encolure'}, {'id': 2833, 'synset': 'hair.n.06', 'name': 'hair'}, {'id': 2834, 'synset': 'squama.n.01', 'name': 'squama'}, {'id': 2835, 'synset': 'scute.n.01', 'name': 'scute'}, {'id': 2836, 'synset': 'sclerite.n.01', 'name': 'sclerite'}, {'id': 2837, 'synset': 'plastron.n.05', 'name': 'plastron'}, {'id': 2838, 'synset': 'scallop_shell.n.01', 'name': 'scallop_shell'}, {'id': 2839, 'synset': 'oyster_shell.n.01', 'name': 'oyster_shell'}, {'id': 2840, 'synset': 'theca.n.02', 'name': 'theca'}, {'id': 2841, 'synset': 'invertebrate.n.01', 'name': 'invertebrate'}, {'id': 2842, 'synset': 'sponge.n.04', 'name': 'sponge'}, {'id': 2843, 'synset': 'choanocyte.n.01', 'name': 'choanocyte'}, {'id': 2844, 'synset': 'glass_sponge.n.01', 'name': 'glass_sponge'}, {'id': 2845, 'synset': "venus's_flower_basket.n.01", 'name': "Venus's_flower_basket"}, {'id': 2846, 'synset': 'metazoan.n.01', 'name': 'metazoan'}, {'id': 2847, 'synset': 'coelenterate.n.01', 'name': 'coelenterate'}, {'id': 2848, 'synset': 'planula.n.01', 'name': 'planula'}, {'id': 2849, 'synset': 'polyp.n.02', 'name': 'polyp'}, {'id': 2850, 'synset': 'medusa.n.02', 'name': 'medusa'}, {'id': 2851, 'synset': 'jellyfish.n.02', 'name': 'jellyfish'}, {'id': 2852, 'synset': 'scyphozoan.n.01', 'name': 'scyphozoan'}, {'id': 2853, 'synset': 'chrysaora_quinquecirrha.n.01', 'name': 'Chrysaora_quinquecirrha'}, {'id': 2854, 'synset': 'hydrozoan.n.01', 'name': 'hydrozoan'}, {'id': 2855, 'synset': 'hydra.n.04', 'name': 'hydra'}, {'id': 2856, 'synset': 'siphonophore.n.01', 'name': 'siphonophore'}, {'id': 2857, 'synset': 'nanomia.n.01', 'name': 'nanomia'}, {'id': 2858, 'synset': 'portuguese_man-of-war.n.01', 'name': 'Portuguese_man-of-war'}, {'id': 2859, 'synset': 'praya.n.01', 'name': 'praya'}, {'id': 2860, 'synset': 'apolemia.n.01', 'name': 'apolemia'}, {'id': 2861, 'synset': 'anthozoan.n.01', 'name': 'anthozoan'}, {'id': 2862, 'synset': 'sea_anemone.n.01', 'name': 'sea_anemone'}, {'id': 2863, 'synset': 'actinia.n.02', 'name': 'actinia'}, {'id': 2864, 'synset': 'sea_pen.n.01', 'name': 'sea_pen'}, {'id': 2865, 'synset': 'coral.n.04', 'name': 'coral'}, {'id': 2866, 'synset': 'gorgonian.n.01', 'name': 'gorgonian'}, {'id': 2867, 'synset': 'sea_feather.n.01', 'name': 'sea_feather'}, {'id': 2868, 'synset': 'sea_fan.n.01', 'name': 'sea_fan'}, {'id': 2869, 'synset': 'red_coral.n.02', 'name': 'red_coral'}, {'id': 2870, 'synset': 'stony_coral.n.01', 'name': 'stony_coral'}, {'id': 2871, 'synset': 'brain_coral.n.01', 'name': 'brain_coral'}, {'id': 2872, 'synset': 'staghorn_coral.n.01', 'name': 'staghorn_coral'}, {'id': 2873, 'synset': 'mushroom_coral.n.01', 'name': 'mushroom_coral'}, {'id': 2874, 'synset': 'ctenophore.n.01', 'name': 'ctenophore'}, {'id': 2875, 'synset': 'beroe.n.01', 'name': 'beroe'}, {'id': 2876, 'synset': 'platyctenean.n.01', 'name': 'platyctenean'}, {'id': 2877, 'synset': 'sea_gooseberry.n.01', 'name': 'sea_gooseberry'}, {'id': 2878, 'synset': "venus's_girdle.n.01", 'name': "Venus's_girdle"}, {'id': 2879, 'synset': 'worm.n.01', 'name': 'worm'}, {'id': 2880, 'synset': 'helminth.n.01', 'name': 'helminth'}, {'id': 2881, 'synset': 'woodworm.n.01', 'name': 'woodworm'}, {'id': 2882, 'synset': 'woodborer.n.01', 'name': 'woodborer'}, {'id': 2883, 'synset': 'acanthocephalan.n.01', 'name': 'acanthocephalan'}, {'id': 2884, 'synset': 'arrowworm.n.01', 'name': 'arrowworm'}, {'id': 2885, 'synset': 'bladder_worm.n.01', 'name': 'bladder_worm'}, {'id': 2886, 'synset': 'flatworm.n.01', 'name': 'flatworm'}, {'id': 2887, 'synset': 'planarian.n.01', 'name': 'planarian'}, {'id': 2888, 'synset': 'fluke.n.05', 'name': 'fluke'}, {'id': 2889, 'synset': 'cercaria.n.01', 'name': 'cercaria'}, {'id': 2890, 'synset': 'liver_fluke.n.01', 'name': 'liver_fluke'}, {'id': 2891, 'synset': 'fasciolopsis_buski.n.01', 'name': 'Fasciolopsis_buski'}, {'id': 2892, 'synset': 'schistosome.n.01', 'name': 'schistosome'}, {'id': 2893, 'synset': 'tapeworm.n.01', 'name': 'tapeworm'}, {'id': 2894, 'synset': 'echinococcus.n.01', 'name': 'echinococcus'}, {'id': 2895, 'synset': 'taenia.n.02', 'name': 'taenia'}, {'id': 2896, 'synset': 'ribbon_worm.n.01', 'name': 'ribbon_worm'}, {'id': 2897, 'synset': 'beard_worm.n.01', 'name': 'beard_worm'}, {'id': 2898, 'synset': 'rotifer.n.01', 'name': 'rotifer'}, {'id': 2899, 'synset': 'nematode.n.01', 'name': 'nematode'}, {'id': 2900, 'synset': 'common_roundworm.n.01', 'name': 'common_roundworm'}, {'id': 2901, 'synset': 'chicken_roundworm.n.01', 'name': 'chicken_roundworm'}, {'id': 2902, 'synset': 'pinworm.n.01', 'name': 'pinworm'}, {'id': 2903, 'synset': 'eelworm.n.01', 'name': 'eelworm'}, {'id': 2904, 'synset': 'vinegar_eel.n.01', 'name': 'vinegar_eel'}, {'id': 2905, 'synset': 'trichina.n.01', 'name': 'trichina'}, {'id': 2906, 'synset': 'hookworm.n.01', 'name': 'hookworm'}, {'id': 2907, 'synset': 'filaria.n.02', 'name': 'filaria'}, {'id': 2908, 'synset': 'guinea_worm.n.02', 'name': 'Guinea_worm'}, {'id': 2909, 'synset': 'annelid.n.01', 'name': 'annelid'}, {'id': 2910, 'synset': 'archiannelid.n.01', 'name': 'archiannelid'}, {'id': 2911, 'synset': 'oligochaete.n.01', 'name': 'oligochaete'}, {'id': 2912, 'synset': 'earthworm.n.01', 'name': 'earthworm'}, {'id': 2913, 'synset': 'polychaete.n.01', 'name': 'polychaete'}, {'id': 2914, 'synset': 'lugworm.n.01', 'name': 'lugworm'}, {'id': 2915, 'synset': 'sea_mouse.n.01', 'name': 'sea_mouse'}, {'id': 2916, 'synset': 'bloodworm.n.01', 'name': 'bloodworm'}, {'id': 2917, 'synset': 'leech.n.01', 'name': 'leech'}, {'id': 2918, 'synset': 'medicinal_leech.n.01', 'name': 'medicinal_leech'}, {'id': 2919, 'synset': 'horseleech.n.01', 'name': 'horseleech'}, {'id': 2920, 'synset': 'mollusk.n.01', 'name': 'mollusk'}, {'id': 2921, 'synset': 'scaphopod.n.01', 'name': 'scaphopod'}, {'id': 2922, 'synset': 'tooth_shell.n.01', 'name': 'tooth_shell'}, {'id': 2923, 'synset': 'gastropod.n.01', 'name': 'gastropod'}, {'id': 2924, 'synset': 'abalone.n.01', 'name': 'abalone'}, {'id': 2925, 'synset': 'ormer.n.01', 'name': 'ormer'}, {'id': 2926, 'synset': 'scorpion_shell.n.01', 'name': 'scorpion_shell'}, {'id': 2927, 'synset': 'conch.n.01', 'name': 'conch'}, {'id': 2928, 'synset': 'giant_conch.n.01', 'name': 'giant_conch'}, {'id': 2929, 'synset': 'snail.n.01', 'name': 'snail'}, {'id': 2930, 'synset': 'edible_snail.n.01', 'name': 'edible_snail'}, {'id': 2931, 'synset': 'garden_snail.n.01', 'name': 'garden_snail'}, {'id': 2932, 'synset': 'brown_snail.n.01', 'name': 'brown_snail'}, {'id': 2933, 'synset': 'helix_hortensis.n.01', 'name': 'Helix_hortensis'}, {'id': 2934, 'synset': 'slug.n.07', 'name': 'slug'}, {'id': 2935, 'synset': 'seasnail.n.02', 'name': 'seasnail'}, {'id': 2936, 'synset': 'neritid.n.01', 'name': 'neritid'}, {'id': 2937, 'synset': 'nerita.n.01', 'name': 'nerita'}, {'id': 2938, 'synset': 'bleeding_tooth.n.01', 'name': 'bleeding_tooth'}, {'id': 2939, 'synset': 'neritina.n.01', 'name': 'neritina'}, {'id': 2940, 'synset': 'whelk.n.02', 'name': 'whelk'}, {'id': 2941, 'synset': 'moon_shell.n.01', 'name': 'moon_shell'}, {'id': 2942, 'synset': 'periwinkle.n.04', 'name': 'periwinkle'}, {'id': 2943, 'synset': 'limpet.n.02', 'name': 'limpet'}, {'id': 2944, 'synset': 'common_limpet.n.01', 'name': 'common_limpet'}, {'id': 2945, 'synset': 'keyhole_limpet.n.01', 'name': 'keyhole_limpet'}, {'id': 2946, 'synset': 'river_limpet.n.01', 'name': 'river_limpet'}, {'id': 2947, 'synset': 'sea_slug.n.01', 'name': 'sea_slug'}, {'id': 2948, 'synset': 'sea_hare.n.01', 'name': 'sea_hare'}, {'id': 2949, 'synset': 'hermissenda_crassicornis.n.01', 'name': 'Hermissenda_crassicornis'}, {'id': 2950, 'synset': 'bubble_shell.n.01', 'name': 'bubble_shell'}, {'id': 2951, 'synset': 'physa.n.01', 'name': 'physa'}, {'id': 2952, 'synset': 'cowrie.n.01', 'name': 'cowrie'}, {'id': 2953, 'synset': 'money_cowrie.n.01', 'name': 'money_cowrie'}, {'id': 2954, 'synset': 'tiger_cowrie.n.01', 'name': 'tiger_cowrie'}, {'id': 2955, 'synset': 'solenogaster.n.01', 'name': 'solenogaster'}, {'id': 2956, 'synset': 'chiton.n.02', 'name': 'chiton'}, {'id': 2957, 'synset': 'bivalve.n.01', 'name': 'bivalve'}, {'id': 2958, 'synset': 'spat.n.03', 'name': 'spat'}, {'id': 2959, 'synset': 'clam.n.01', 'name': 'clam'}, {'id': 2960, 'synset': 'soft-shell_clam.n.02', 'name': 'soft-shell_clam'}, {'id': 2961, 'synset': 'quahog.n.02', 'name': 'quahog'}, {'id': 2962, 'synset': 'littleneck.n.02', 'name': 'littleneck'}, {'id': 2963, 'synset': 'cherrystone.n.02', 'name': 'cherrystone'}, {'id': 2964, 'synset': 'geoduck.n.01', 'name': 'geoduck'}, {'id': 2965, 'synset': 'razor_clam.n.01', 'name': 'razor_clam'}, {'id': 2966, 'synset': 'giant_clam.n.01', 'name': 'giant_clam'}, {'id': 2967, 'synset': 'cockle.n.02', 'name': 'cockle'}, {'id': 2968, 'synset': 'edible_cockle.n.01', 'name': 'edible_cockle'}, {'id': 2969, 'synset': 'oyster.n.01', 'name': 'oyster'}, {'id': 2970, 'synset': 'japanese_oyster.n.01', 'name': 'Japanese_oyster'}, {'id': 2971, 'synset': 'virginia_oyster.n.01', 'name': 'Virginia_oyster'}, {'id': 2972, 'synset': 'pearl_oyster.n.01', 'name': 'pearl_oyster'}, {'id': 2973, 'synset': 'saddle_oyster.n.01', 'name': 'saddle_oyster'}, {'id': 2974, 'synset': 'window_oyster.n.01', 'name': 'window_oyster'}, {'id': 2975, 'synset': 'ark_shell.n.01', 'name': 'ark_shell'}, {'id': 2976, 'synset': 'blood_clam.n.01', 'name': 'blood_clam'}, {'id': 2977, 'synset': 'mussel.n.02', 'name': 'mussel'}, {'id': 2978, 'synset': 'marine_mussel.n.01', 'name': 'marine_mussel'}, {'id': 2979, 'synset': 'edible_mussel.n.01', 'name': 'edible_mussel'}, {'id': 2980, 'synset': 'freshwater_mussel.n.01', 'name': 'freshwater_mussel'}, {'id': 2981, 'synset': 'pearly-shelled_mussel.n.01', 'name': 'pearly-shelled_mussel'}, {'id': 2982, 'synset': 'thin-shelled_mussel.n.01', 'name': 'thin-shelled_mussel'}, {'id': 2983, 'synset': 'zebra_mussel.n.01', 'name': 'zebra_mussel'}, {'id': 2984, 'synset': 'scallop.n.04', 'name': 'scallop'}, {'id': 2985, 'synset': 'bay_scallop.n.02', 'name': 'bay_scallop'}, {'id': 2986, 'synset': 'sea_scallop.n.02', 'name': 'sea_scallop'}, {'id': 2987, 'synset': 'shipworm.n.01', 'name': 'shipworm'}, {'id': 2988, 'synset': 'teredo.n.01', 'name': 'teredo'}, {'id': 2989, 'synset': 'piddock.n.01', 'name': 'piddock'}, {'id': 2990, 'synset': 'cephalopod.n.01', 'name': 'cephalopod'}, {'id': 2991, 'synset': 'chambered_nautilus.n.01', 'name': 'chambered_nautilus'}, {'id': 2992, 'synset': 'octopod.n.01', 'name': 'octopod'}, {'id': 2993, 'synset': 'paper_nautilus.n.01', 'name': 'paper_nautilus'}, {'id': 2994, 'synset': 'decapod.n.02', 'name': 'decapod'}, {'id': 2995, 'synset': 'squid.n.02', 'name': 'squid'}, {'id': 2996, 'synset': 'loligo.n.01', 'name': 'loligo'}, {'id': 2997, 'synset': 'ommastrephes.n.01', 'name': 'ommastrephes'}, {'id': 2998, 'synset': 'architeuthis.n.01', 'name': 'architeuthis'}, {'id': 2999, 'synset': 'cuttlefish.n.01', 'name': 'cuttlefish'}, {'id': 3000, 'synset': 'spirula.n.01', 'name': 'spirula'}, {'id': 3001, 'synset': 'crustacean.n.01', 'name': 'crustacean'}, {'id': 3002, 'synset': 'malacostracan_crustacean.n.01', 'name': 'malacostracan_crustacean'}, {'id': 3003, 'synset': 'decapod_crustacean.n.01', 'name': 'decapod_crustacean'}, {'id': 3004, 'synset': 'brachyuran.n.01', 'name': 'brachyuran'}, {'id': 3005, 'synset': 'stone_crab.n.02', 'name': 'stone_crab'}, {'id': 3006, 'synset': 'hard-shell_crab.n.01', 'name': 'hard-shell_crab'}, {'id': 3007, 'synset': 'soft-shell_crab.n.02', 'name': 'soft-shell_crab'}, {'id': 3008, 'synset': 'dungeness_crab.n.02', 'name': 'Dungeness_crab'}, {'id': 3009, 'synset': 'rock_crab.n.01', 'name': 'rock_crab'}, {'id': 3010, 'synset': 'jonah_crab.n.01', 'name': 'Jonah_crab'}, {'id': 3011, 'synset': 'swimming_crab.n.01', 'name': 'swimming_crab'}, {'id': 3012, 'synset': 'english_lady_crab.n.01', 'name': 'English_lady_crab'}, {'id': 3013, 'synset': 'american_lady_crab.n.01', 'name': 'American_lady_crab'}, {'id': 3014, 'synset': 'blue_crab.n.02', 'name': 'blue_crab'}, {'id': 3015, 'synset': 'fiddler_crab.n.01', 'name': 'fiddler_crab'}, {'id': 3016, 'synset': 'pea_crab.n.01', 'name': 'pea_crab'}, {'id': 3017, 'synset': 'king_crab.n.03', 'name': 'king_crab'}, {'id': 3018, 'synset': 'spider_crab.n.01', 'name': 'spider_crab'}, {'id': 3019, 'synset': 'european_spider_crab.n.01', 'name': 'European_spider_crab'}, {'id': 3020, 'synset': 'giant_crab.n.01', 'name': 'giant_crab'}, {'id': 3021, 'synset': 'lobster.n.02', 'name': 'lobster'}, {'id': 3022, 'synset': 'true_lobster.n.01', 'name': 'true_lobster'}, {'id': 3023, 'synset': 'american_lobster.n.02', 'name': 'American_lobster'}, {'id': 3024, 'synset': 'european_lobster.n.02', 'name': 'European_lobster'}, {'id': 3025, 'synset': 'cape_lobster.n.01', 'name': 'Cape_lobster'}, {'id': 3026, 'synset': 'norway_lobster.n.01', 'name': 'Norway_lobster'}, {'id': 3027, 'synset': 'crayfish.n.03', 'name': 'crayfish'}, {'id': 3028, 'synset': 'old_world_crayfish.n.01', 'name': 'Old_World_crayfish'}, {'id': 3029, 'synset': 'american_crayfish.n.01', 'name': 'American_crayfish'}, {'id': 3030, 'synset': 'hermit_crab.n.01', 'name': 'hermit_crab'}, {'id': 3031, 'synset': 'shrimp.n.03', 'name': 'shrimp'}, {'id': 3032, 'synset': 'snapping_shrimp.n.01', 'name': 'snapping_shrimp'}, {'id': 3033, 'synset': 'prawn.n.02', 'name': 'prawn'}, {'id': 3034, 'synset': 'long-clawed_prawn.n.01', 'name': 'long-clawed_prawn'}, {'id': 3035, 'synset': 'tropical_prawn.n.01', 'name': 'tropical_prawn'}, {'id': 3036, 'synset': 'krill.n.01', 'name': 'krill'}, {'id': 3037, 'synset': 'euphausia_pacifica.n.01', 'name': 'Euphausia_pacifica'}, {'id': 3038, 'synset': 'opossum_shrimp.n.01', 'name': 'opossum_shrimp'}, {'id': 3039, 'synset': 'stomatopod.n.01', 'name': 'stomatopod'}, {'id': 3040, 'synset': 'mantis_shrimp.n.01', 'name': 'mantis_shrimp'}, {'id': 3041, 'synset': 'squilla.n.01', 'name': 'squilla'}, {'id': 3042, 'synset': 'isopod.n.01', 'name': 'isopod'}, {'id': 3043, 'synset': 'woodlouse.n.01', 'name': 'woodlouse'}, {'id': 3044, 'synset': 'pill_bug.n.01', 'name': 'pill_bug'}, {'id': 3045, 'synset': 'sow_bug.n.01', 'name': 'sow_bug'}, {'id': 3046, 'synset': 'sea_louse.n.01', 'name': 'sea_louse'}, {'id': 3047, 'synset': 'amphipod.n.01', 'name': 'amphipod'}, {'id': 3048, 'synset': 'skeleton_shrimp.n.01', 'name': 'skeleton_shrimp'}, {'id': 3049, 'synset': 'whale_louse.n.01', 'name': 'whale_louse'}, {'id': 3050, 'synset': 'daphnia.n.01', 'name': 'daphnia'}, {'id': 3051, 'synset': 'fairy_shrimp.n.01', 'name': 'fairy_shrimp'}, {'id': 3052, 'synset': 'brine_shrimp.n.01', 'name': 'brine_shrimp'}, {'id': 3053, 'synset': 'tadpole_shrimp.n.01', 'name': 'tadpole_shrimp'}, {'id': 3054, 'synset': 'copepod.n.01', 'name': 'copepod'}, {'id': 3055, 'synset': 'cyclops.n.02', 'name': 'cyclops'}, {'id': 3056, 'synset': 'seed_shrimp.n.01', 'name': 'seed_shrimp'}, {'id': 3057, 'synset': 'barnacle.n.01', 'name': 'barnacle'}, {'id': 3058, 'synset': 'acorn_barnacle.n.01', 'name': 'acorn_barnacle'}, {'id': 3059, 'synset': 'goose_barnacle.n.01', 'name': 'goose_barnacle'}, {'id': 3060, 'synset': 'onychophoran.n.01', 'name': 'onychophoran'}, {'id': 3061, 'synset': 'wading_bird.n.01', 'name': 'wading_bird'}, {'id': 3062, 'synset': 'stork.n.01', 'name': 'stork'}, {'id': 3063, 'synset': 'white_stork.n.01', 'name': 'white_stork'}, {'id': 3064, 'synset': 'black_stork.n.01', 'name': 'black_stork'}, {'id': 3065, 'synset': 'adjutant_bird.n.01', 'name': 'adjutant_bird'}, {'id': 3066, 'synset': 'marabou.n.01', 'name': 'marabou'}, {'id': 3067, 'synset': 'openbill.n.01', 'name': 'openbill'}, {'id': 3068, 'synset': 'jabiru.n.03', 'name': 'jabiru'}, {'id': 3069, 'synset': 'saddlebill.n.01', 'name': 'saddlebill'}, {'id': 3070, 'synset': 'policeman_bird.n.01', 'name': 'policeman_bird'}, {'id': 3071, 'synset': 'wood_ibis.n.02', 'name': 'wood_ibis'}, {'id': 3072, 'synset': 'shoebill.n.01', 'name': 'shoebill'}, {'id': 3073, 'synset': 'ibis.n.01', 'name': 'ibis'}, {'id': 3074, 'synset': 'wood_ibis.n.01', 'name': 'wood_ibis'}, {'id': 3075, 'synset': 'sacred_ibis.n.01', 'name': 'sacred_ibis'}, {'id': 3076, 'synset': 'spoonbill.n.01', 'name': 'spoonbill'}, {'id': 3077, 'synset': 'common_spoonbill.n.01', 'name': 'common_spoonbill'}, {'id': 3078, 'synset': 'roseate_spoonbill.n.01', 'name': 'roseate_spoonbill'}, {'id': 3079, 'synset': 'great_blue_heron.n.01', 'name': 'great_blue_heron'}, {'id': 3080, 'synset': 'great_white_heron.n.03', 'name': 'great_white_heron'}, {'id': 3081, 'synset': 'egret.n.01', 'name': 'egret'}, {'id': 3082, 'synset': 'little_blue_heron.n.01', 'name': 'little_blue_heron'}, {'id': 3083, 'synset': 'snowy_egret.n.01', 'name': 'snowy_egret'}, {'id': 3084, 'synset': 'little_egret.n.01', 'name': 'little_egret'}, {'id': 3085, 'synset': 'great_white_heron.n.02', 'name': 'great_white_heron'}, {'id': 3086, 'synset': 'american_egret.n.01', 'name': 'American_egret'}, {'id': 3087, 'synset': 'cattle_egret.n.01', 'name': 'cattle_egret'}, {'id': 3088, 'synset': 'night_heron.n.01', 'name': 'night_heron'}, {'id': 3089, 'synset': 'black-crowned_night_heron.n.01', 'name': 'black-crowned_night_heron'}, {'id': 3090, 'synset': 'yellow-crowned_night_heron.n.01', 'name': 'yellow-crowned_night_heron'}, {'id': 3091, 'synset': 'boatbill.n.01', 'name': 'boatbill'}, {'id': 3092, 'synset': 'bittern.n.01', 'name': 'bittern'}, {'id': 3093, 'synset': 'american_bittern.n.01', 'name': 'American_bittern'}, {'id': 3094, 'synset': 'european_bittern.n.01', 'name': 'European_bittern'}, {'id': 3095, 'synset': 'least_bittern.n.01', 'name': 'least_bittern'}, {'id': 3096, 'synset': 'crane.n.05', 'name': 'crane'}, {'id': 3097, 'synset': 'whooping_crane.n.01', 'name': 'whooping_crane'}, {'id': 3098, 'synset': 'courlan.n.01', 'name': 'courlan'}, {'id': 3099, 'synset': 'limpkin.n.01', 'name': 'limpkin'}, {'id': 3100, 'synset': 'crested_cariama.n.01', 'name': 'crested_cariama'}, {'id': 3101, 'synset': 'chunga.n.01', 'name': 'chunga'}, {'id': 3102, 'synset': 'rail.n.05', 'name': 'rail'}, {'id': 3103, 'synset': 'weka.n.01', 'name': 'weka'}, {'id': 3104, 'synset': 'crake.n.01', 'name': 'crake'}, {'id': 3105, 'synset': 'corncrake.n.01', 'name': 'corncrake'}, {'id': 3106, 'synset': 'spotted_crake.n.01', 'name': 'spotted_crake'}, {'id': 3107, 'synset': 'gallinule.n.01', 'name': 'gallinule'}, {'id': 3108, 'synset': 'florida_gallinule.n.01', 'name': 'Florida_gallinule'}, {'id': 3109, 'synset': 'moorhen.n.01', 'name': 'moorhen'}, {'id': 3110, 'synset': 'purple_gallinule.n.01', 'name': 'purple_gallinule'}, {'id': 3111, 'synset': 'european_gallinule.n.01', 'name': 'European_gallinule'}, {'id': 3112, 'synset': 'american_gallinule.n.01', 'name': 'American_gallinule'}, {'id': 3113, 'synset': 'notornis.n.01', 'name': 'notornis'}, {'id': 3114, 'synset': 'coot.n.01', 'name': 'coot'}, {'id': 3115, 'synset': 'american_coot.n.01', 'name': 'American_coot'}, {'id': 3116, 'synset': 'old_world_coot.n.01', 'name': 'Old_World_coot'}, {'id': 3117, 'synset': 'bustard.n.01', 'name': 'bustard'}, {'id': 3118, 'synset': 'great_bustard.n.01', 'name': 'great_bustard'}, {'id': 3119, 'synset': 'plain_turkey.n.01', 'name': 'plain_turkey'}, {'id': 3120, 'synset': 'button_quail.n.01', 'name': 'button_quail'}, {'id': 3121, 'synset': 'striped_button_quail.n.01', 'name': 'striped_button_quail'}, {'id': 3122, 'synset': 'plain_wanderer.n.01', 'name': 'plain_wanderer'}, {'id': 3123, 'synset': 'trumpeter.n.03', 'name': 'trumpeter'}, {'id': 3124, 'synset': 'brazilian_trumpeter.n.01', 'name': 'Brazilian_trumpeter'}, {'id': 3125, 'synset': 'shorebird.n.01', 'name': 'shorebird'}, {'id': 3126, 'synset': 'plover.n.01', 'name': 'plover'}, {'id': 3127, 'synset': 'piping_plover.n.01', 'name': 'piping_plover'}, {'id': 3128, 'synset': 'killdeer.n.01', 'name': 'killdeer'}, {'id': 3129, 'synset': 'dotterel.n.01', 'name': 'dotterel'}, {'id': 3130, 'synset': 'golden_plover.n.01', 'name': 'golden_plover'}, {'id': 3131, 'synset': 'lapwing.n.01', 'name': 'lapwing'}, {'id': 3132, 'synset': 'turnstone.n.01', 'name': 'turnstone'}, {'id': 3133, 'synset': 'ruddy_turnstone.n.01', 'name': 'ruddy_turnstone'}, {'id': 3134, 'synset': 'black_turnstone.n.01', 'name': 'black_turnstone'}, {'id': 3135, 'synset': 'sandpiper.n.01', 'name': 'sandpiper'}, {'id': 3136, 'synset': 'surfbird.n.01', 'name': 'surfbird'}, {'id': 3137, 'synset': 'european_sandpiper.n.01', 'name': 'European_sandpiper'}, {'id': 3138, 'synset': 'spotted_sandpiper.n.01', 'name': 'spotted_sandpiper'}, {'id': 3139, 'synset': 'least_sandpiper.n.01', 'name': 'least_sandpiper'}, {'id': 3140, 'synset': 'red-backed_sandpiper.n.01', 'name': 'red-backed_sandpiper'}, {'id': 3141, 'synset': 'greenshank.n.01', 'name': 'greenshank'}, {'id': 3142, 'synset': 'redshank.n.01', 'name': 'redshank'}, {'id': 3143, 'synset': 'yellowlegs.n.01', 'name': 'yellowlegs'}, {'id': 3144, 'synset': 'greater_yellowlegs.n.01', 'name': 'greater_yellowlegs'}, {'id': 3145, 'synset': 'lesser_yellowlegs.n.01', 'name': 'lesser_yellowlegs'}, {'id': 3146, 'synset': 'pectoral_sandpiper.n.01', 'name': 'pectoral_sandpiper'}, {'id': 3147, 'synset': 'knot.n.07', 'name': 'knot'}, {'id': 3148, 'synset': 'curlew_sandpiper.n.01', 'name': 'curlew_sandpiper'}, {'id': 3149, 'synset': 'sanderling.n.01', 'name': 'sanderling'}, {'id': 3150, 'synset': 'upland_sandpiper.n.01', 'name': 'upland_sandpiper'}, {'id': 3151, 'synset': 'ruff.n.03', 'name': 'ruff'}, {'id': 3152, 'synset': 'reeve.n.01', 'name': 'reeve'}, {'id': 3153, 'synset': 'tattler.n.02', 'name': 'tattler'}, {'id': 3154, 'synset': 'polynesian_tattler.n.01', 'name': 'Polynesian_tattler'}, {'id': 3155, 'synset': 'willet.n.01', 'name': 'willet'}, {'id': 3156, 'synset': 'woodcock.n.01', 'name': 'woodcock'}, {'id': 3157, 'synset': 'eurasian_woodcock.n.01', 'name': 'Eurasian_woodcock'}, {'id': 3158, 'synset': 'american_woodcock.n.01', 'name': 'American_woodcock'}, {'id': 3159, 'synset': 'snipe.n.01', 'name': 'snipe'}, {'id': 3160, 'synset': 'whole_snipe.n.01', 'name': 'whole_snipe'}, {'id': 3161, 'synset': "wilson's_snipe.n.01", 'name': "Wilson's_snipe"}, {'id': 3162, 'synset': 'great_snipe.n.01', 'name': 'great_snipe'}, {'id': 3163, 'synset': 'jacksnipe.n.01', 'name': 'jacksnipe'}, {'id': 3164, 'synset': 'dowitcher.n.01', 'name': 'dowitcher'}, {'id': 3165, 'synset': 'greyback.n.02', 'name': 'greyback'}, {'id': 3166, 'synset': 'red-breasted_snipe.n.01', 'name': 'red-breasted_snipe'}, {'id': 3167, 'synset': 'curlew.n.01', 'name': 'curlew'}, {'id': 3168, 'synset': 'european_curlew.n.01', 'name': 'European_curlew'}, {'id': 3169, 'synset': 'eskimo_curlew.n.01', 'name': 'Eskimo_curlew'}, {'id': 3170, 'synset': 'godwit.n.01', 'name': 'godwit'}, {'id': 3171, 'synset': 'hudsonian_godwit.n.01', 'name': 'Hudsonian_godwit'}, {'id': 3172, 'synset': 'stilt.n.04', 'name': 'stilt'}, {'id': 3173, 'synset': 'black-necked_stilt.n.01', 'name': 'black-necked_stilt'}, {'id': 3174, 'synset': 'black-winged_stilt.n.01', 'name': 'black-winged_stilt'}, {'id': 3175, 'synset': 'white-headed_stilt.n.01', 'name': 'white-headed_stilt'}, {'id': 3176, 'synset': 'kaki.n.02', 'name': 'kaki'}, {'id': 3177, 'synset': 'stilt.n.03', 'name': 'stilt'}, {'id': 3178, 'synset': 'banded_stilt.n.01', 'name': 'banded_stilt'}, {'id': 3179, 'synset': 'avocet.n.01', 'name': 'avocet'}, {'id': 3180, 'synset': 'oystercatcher.n.01', 'name': 'oystercatcher'}, {'id': 3181, 'synset': 'phalarope.n.01', 'name': 'phalarope'}, {'id': 3182, 'synset': 'red_phalarope.n.01', 'name': 'red_phalarope'}, {'id': 3183, 'synset': 'northern_phalarope.n.01', 'name': 'northern_phalarope'}, {'id': 3184, 'synset': "wilson's_phalarope.n.01", 'name': "Wilson's_phalarope"}, {'id': 3185, 'synset': 'pratincole.n.01', 'name': 'pratincole'}, {'id': 3186, 'synset': 'courser.n.04', 'name': 'courser'}, {'id': 3187, 'synset': 'cream-colored_courser.n.01', 'name': 'cream-colored_courser'}, {'id': 3188, 'synset': 'crocodile_bird.n.01', 'name': 'crocodile_bird'}, {'id': 3189, 'synset': 'stone_curlew.n.01', 'name': 'stone_curlew'}, {'id': 3190, 'synset': 'coastal_diving_bird.n.01', 'name': 'coastal_diving_bird'}, {'id': 3191, 'synset': 'larid.n.01', 'name': 'larid'}, {'id': 3192, 'synset': 'mew.n.02', 'name': 'mew'}, {'id': 3193, 'synset': 'black-backed_gull.n.01', 'name': 'black-backed_gull'}, {'id': 3194, 'synset': 'herring_gull.n.01', 'name': 'herring_gull'}, {'id': 3195, 'synset': 'laughing_gull.n.01', 'name': 'laughing_gull'}, {'id': 3196, 'synset': 'ivory_gull.n.01', 'name': 'ivory_gull'}, {'id': 3197, 'synset': 'kittiwake.n.01', 'name': 'kittiwake'}, {'id': 3198, 'synset': 'tern.n.01', 'name': 'tern'}, {'id': 3199, 'synset': 'sea_swallow.n.01', 'name': 'sea_swallow'}, {'id': 3200, 'synset': 'skimmer.n.04', 'name': 'skimmer'}, {'id': 3201, 'synset': 'jaeger.n.01', 'name': 'jaeger'}, {'id': 3202, 'synset': 'parasitic_jaeger.n.01', 'name': 'parasitic_jaeger'}, {'id': 3203, 'synset': 'skua.n.01', 'name': 'skua'}, {'id': 3204, 'synset': 'great_skua.n.01', 'name': 'great_skua'}, {'id': 3205, 'synset': 'auk.n.01', 'name': 'auk'}, {'id': 3206, 'synset': 'auklet.n.01', 'name': 'auklet'}, {'id': 3207, 'synset': 'razorbill.n.01', 'name': 'razorbill'}, {'id': 3208, 'synset': 'little_auk.n.01', 'name': 'little_auk'}, {'id': 3209, 'synset': 'guillemot.n.01', 'name': 'guillemot'}, {'id': 3210, 'synset': 'black_guillemot.n.01', 'name': 'black_guillemot'}, {'id': 3211, 'synset': 'pigeon_guillemot.n.01', 'name': 'pigeon_guillemot'}, {'id': 3212, 'synset': 'murre.n.01', 'name': 'murre'}, {'id': 3213, 'synset': 'common_murre.n.01', 'name': 'common_murre'}, {'id': 3214, 'synset': 'thick-billed_murre.n.01', 'name': 'thick-billed_murre'}, {'id': 3215, 'synset': 'atlantic_puffin.n.01', 'name': 'Atlantic_puffin'}, {'id': 3216, 'synset': 'horned_puffin.n.01', 'name': 'horned_puffin'}, {'id': 3217, 'synset': 'tufted_puffin.n.01', 'name': 'tufted_puffin'}, {'id': 3218, 'synset': 'gaviiform_seabird.n.01', 'name': 'gaviiform_seabird'}, {'id': 3219, 'synset': 'loon.n.02', 'name': 'loon'}, {'id': 3220, 'synset': 'podicipitiform_seabird.n.01', 'name': 'podicipitiform_seabird'}, {'id': 3221, 'synset': 'grebe.n.01', 'name': 'grebe'}, {'id': 3222, 'synset': 'great_crested_grebe.n.01', 'name': 'great_crested_grebe'}, {'id': 3223, 'synset': 'red-necked_grebe.n.01', 'name': 'red-necked_grebe'}, {'id': 3224, 'synset': 'black-necked_grebe.n.01', 'name': 'black-necked_grebe'}, {'id': 3225, 'synset': 'dabchick.n.01', 'name': 'dabchick'}, {'id': 3226, 'synset': 'pied-billed_grebe.n.01', 'name': 'pied-billed_grebe'}, {'id': 3227, 'synset': 'pelecaniform_seabird.n.01', 'name': 'pelecaniform_seabird'}, {'id': 3228, 'synset': 'white_pelican.n.01', 'name': 'white_pelican'}, {'id': 3229, 'synset': 'old_world_white_pelican.n.01', 'name': 'Old_world_white_pelican'}, {'id': 3230, 'synset': 'frigate_bird.n.01', 'name': 'frigate_bird'}, {'id': 3231, 'synset': 'gannet.n.01', 'name': 'gannet'}, {'id': 3232, 'synset': 'solan.n.01', 'name': 'solan'}, {'id': 3233, 'synset': 'booby.n.02', 'name': 'booby'}, {'id': 3234, 'synset': 'cormorant.n.01', 'name': 'cormorant'}, {'id': 3235, 'synset': 'snakebird.n.01', 'name': 'snakebird'}, {'id': 3236, 'synset': 'water_turkey.n.01', 'name': 'water_turkey'}, {'id': 3237, 'synset': 'tropic_bird.n.01', 'name': 'tropic_bird'}, {'id': 3238, 'synset': 'sphenisciform_seabird.n.01', 'name': 'sphenisciform_seabird'}, {'id': 3239, 'synset': 'adelie.n.01', 'name': 'Adelie'}, {'id': 3240, 'synset': 'king_penguin.n.01', 'name': 'king_penguin'}, {'id': 3241, 'synset': 'emperor_penguin.n.01', 'name': 'emperor_penguin'}, {'id': 3242, 'synset': 'jackass_penguin.n.01', 'name': 'jackass_penguin'}, {'id': 3243, 'synset': 'rock_hopper.n.01', 'name': 'rock_hopper'}, {'id': 3244, 'synset': 'pelagic_bird.n.01', 'name': 'pelagic_bird'}, {'id': 3245, 'synset': 'procellariiform_seabird.n.01', 'name': 'procellariiform_seabird'}, {'id': 3246, 'synset': 'albatross.n.02', 'name': 'albatross'}, {'id': 3247, 'synset': 'wandering_albatross.n.01', 'name': 'wandering_albatross'}, {'id': 3248, 'synset': 'black-footed_albatross.n.01', 'name': 'black-footed_albatross'}, {'id': 3249, 'synset': 'petrel.n.01', 'name': 'petrel'}, {'id': 3250, 'synset': 'white-chinned_petrel.n.01', 'name': 'white-chinned_petrel'}, {'id': 3251, 'synset': 'giant_petrel.n.01', 'name': 'giant_petrel'}, {'id': 3252, 'synset': 'fulmar.n.01', 'name': 'fulmar'}, {'id': 3253, 'synset': 'shearwater.n.01', 'name': 'shearwater'}, {'id': 3254, 'synset': 'manx_shearwater.n.01', 'name': 'Manx_shearwater'}, {'id': 3255, 'synset': 'storm_petrel.n.01', 'name': 'storm_petrel'}, {'id': 3256, 'synset': 'stormy_petrel.n.01', 'name': 'stormy_petrel'}, {'id': 3257, 'synset': "mother_carey's_chicken.n.01", 'name': "Mother_Carey's_chicken"}, {'id': 3258, 'synset': 'diving_petrel.n.01', 'name': 'diving_petrel'}, {'id': 3259, 'synset': 'aquatic_mammal.n.01', 'name': 'aquatic_mammal'}, {'id': 3260, 'synset': 'cetacean.n.01', 'name': 'cetacean'}, {'id': 3261, 'synset': 'whale.n.02', 'name': 'whale'}, {'id': 3262, 'synset': 'baleen_whale.n.01', 'name': 'baleen_whale'}, {'id': 3263, 'synset': 'right_whale.n.01', 'name': 'right_whale'}, {'id': 3264, 'synset': 'bowhead.n.01', 'name': 'bowhead'}, {'id': 3265, 'synset': 'rorqual.n.01', 'name': 'rorqual'}, {'id': 3266, 'synset': 'blue_whale.n.01', 'name': 'blue_whale'}, {'id': 3267, 'synset': 'finback.n.01', 'name': 'finback'}, {'id': 3268, 'synset': 'sei_whale.n.01', 'name': 'sei_whale'}, {'id': 3269, 'synset': 'lesser_rorqual.n.01', 'name': 'lesser_rorqual'}, {'id': 3270, 'synset': 'humpback.n.03', 'name': 'humpback'}, {'id': 3271, 'synset': 'grey_whale.n.01', 'name': 'grey_whale'}, {'id': 3272, 'synset': 'toothed_whale.n.01', 'name': 'toothed_whale'}, {'id': 3273, 'synset': 'sperm_whale.n.01', 'name': 'sperm_whale'}, {'id': 3274, 'synset': 'pygmy_sperm_whale.n.01', 'name': 'pygmy_sperm_whale'}, {'id': 3275, 'synset': 'dwarf_sperm_whale.n.01', 'name': 'dwarf_sperm_whale'}, {'id': 3276, 'synset': 'beaked_whale.n.01', 'name': 'beaked_whale'}, {'id': 3277, 'synset': 'bottle-nosed_whale.n.01', 'name': 'bottle-nosed_whale'}, {'id': 3278, 'synset': 'common_dolphin.n.01', 'name': 'common_dolphin'}, {'id': 3279, 'synset': 'bottlenose_dolphin.n.01', 'name': 'bottlenose_dolphin'}, {'id': 3280, 'synset': 'atlantic_bottlenose_dolphin.n.01', 'name': 'Atlantic_bottlenose_dolphin'}, {'id': 3281, 'synset': 'pacific_bottlenose_dolphin.n.01', 'name': 'Pacific_bottlenose_dolphin'}, {'id': 3282, 'synset': 'porpoise.n.01', 'name': 'porpoise'}, {'id': 3283, 'synset': 'harbor_porpoise.n.01', 'name': 'harbor_porpoise'}, {'id': 3284, 'synset': 'vaquita.n.01', 'name': 'vaquita'}, {'id': 3285, 'synset': 'grampus.n.02', 'name': 'grampus'}, {'id': 3286, 'synset': 'killer_whale.n.01', 'name': 'killer_whale'}, {'id': 3287, 'synset': 'pilot_whale.n.01', 'name': 'pilot_whale'}, {'id': 3288, 'synset': 'river_dolphin.n.01', 'name': 'river_dolphin'}, {'id': 3289, 'synset': 'narwhal.n.01', 'name': 'narwhal'}, {'id': 3290, 'synset': 'white_whale.n.01', 'name': 'white_whale'}, {'id': 3291, 'synset': 'sea_cow.n.01', 'name': 'sea_cow'}, {'id': 3292, 'synset': 'dugong.n.01', 'name': 'dugong'}, {'id': 3293, 'synset': "steller's_sea_cow.n.01", 'name': "Steller's_sea_cow"}, {'id': 3294, 'synset': 'carnivore.n.01', 'name': 'carnivore'}, {'id': 3295, 'synset': 'omnivore.n.02', 'name': 'omnivore'}, {'id': 3296, 'synset': 'pinniped_mammal.n.01', 'name': 'pinniped_mammal'}, {'id': 3297, 'synset': 'seal.n.09', 'name': 'seal'}, {'id': 3298, 'synset': 'crabeater_seal.n.01', 'name': 'crabeater_seal'}, {'id': 3299, 'synset': 'eared_seal.n.01', 'name': 'eared_seal'}, {'id': 3300, 'synset': 'fur_seal.n.02', 'name': 'fur_seal'}, {'id': 3301, 'synset': 'guadalupe_fur_seal.n.01', 'name': 'guadalupe_fur_seal'}, {'id': 3302, 'synset': 'fur_seal.n.01', 'name': 'fur_seal'}, {'id': 3303, 'synset': 'alaska_fur_seal.n.01', 'name': 'Alaska_fur_seal'}, {'id': 3304, 'synset': 'sea_lion.n.01', 'name': 'sea_lion'}, {'id': 3305, 'synset': 'south_american_sea_lion.n.01', 'name': 'South_American_sea_lion'}, {'id': 3306, 'synset': 'california_sea_lion.n.01', 'name': 'California_sea_lion'}, {'id': 3307, 'synset': 'australian_sea_lion.n.01', 'name': 'Australian_sea_lion'}, {'id': 3308, 'synset': 'steller_sea_lion.n.01', 'name': 'Steller_sea_lion'}, {'id': 3309, 'synset': 'earless_seal.n.01', 'name': 'earless_seal'}, {'id': 3310, 'synset': 'harbor_seal.n.01', 'name': 'harbor_seal'}, {'id': 3311, 'synset': 'harp_seal.n.01', 'name': 'harp_seal'}, {'id': 3312, 'synset': 'elephant_seal.n.01', 'name': 'elephant_seal'}, {'id': 3313, 'synset': 'bearded_seal.n.01', 'name': 'bearded_seal'}, {'id': 3314, 'synset': 'hooded_seal.n.01', 'name': 'hooded_seal'}, {'id': 3315, 'synset': 'atlantic_walrus.n.01', 'name': 'Atlantic_walrus'}, {'id': 3316, 'synset': 'pacific_walrus.n.01', 'name': 'Pacific_walrus'}, {'id': 3317, 'synset': 'fissipedia.n.01', 'name': 'Fissipedia'}, {'id': 3318, 'synset': 'fissiped_mammal.n.01', 'name': 'fissiped_mammal'}, {'id': 3319, 'synset': 'aardvark.n.01', 'name': 'aardvark'}, {'id': 3320, 'synset': 'canine.n.02', 'name': 'canine'}, {'id': 3321, 'synset': 'bitch.n.04', 'name': 'bitch'}, {'id': 3322, 'synset': 'brood_bitch.n.01', 'name': 'brood_bitch'}, {'id': 3323, 'synset': 'pooch.n.01', 'name': 'pooch'}, {'id': 3324, 'synset': 'cur.n.01', 'name': 'cur'}, {'id': 3325, 'synset': 'feist.n.01', 'name': 'feist'}, {'id': 3326, 'synset': 'pariah_dog.n.01', 'name': 'pariah_dog'}, {'id': 3327, 'synset': 'lapdog.n.01', 'name': 'lapdog'}, {'id': 3328, 'synset': 'toy_dog.n.01', 'name': 'toy_dog'}, {'id': 3329, 'synset': 'chihuahua.n.03', 'name': 'Chihuahua'}, {'id': 3330, 'synset': 'japanese_spaniel.n.01', 'name': 'Japanese_spaniel'}, {'id': 3331, 'synset': 'maltese_dog.n.01', 'name': 'Maltese_dog'}, {'id': 3332, 'synset': 'pekinese.n.01', 'name': 'Pekinese'}, {'id': 3333, 'synset': 'shih-tzu.n.01', 'name': 'Shih-Tzu'}, {'id': 3334, 'synset': 'toy_spaniel.n.01', 'name': 'toy_spaniel'}, {'id': 3335, 'synset': 'english_toy_spaniel.n.01', 'name': 'English_toy_spaniel'}, {'id': 3336, 'synset': 'blenheim_spaniel.n.01', 'name': 'Blenheim_spaniel'}, {'id': 3337, 'synset': 'king_charles_spaniel.n.01', 'name': 'King_Charles_spaniel'}, {'id': 3338, 'synset': 'papillon.n.01', 'name': 'papillon'}, {'id': 3339, 'synset': 'toy_terrier.n.01', 'name': 'toy_terrier'}, {'id': 3340, 'synset': 'hunting_dog.n.01', 'name': 'hunting_dog'}, {'id': 3341, 'synset': 'courser.n.03', 'name': 'courser'}, {'id': 3342, 'synset': 'rhodesian_ridgeback.n.01', 'name': 'Rhodesian_ridgeback'}, {'id': 3343, 'synset': 'hound.n.01', 'name': 'hound'}, {'id': 3344, 'synset': 'afghan_hound.n.01', 'name': 'Afghan_hound'}, {'id': 3345, 'synset': 'basset.n.01', 'name': 'basset'}, {'id': 3346, 'synset': 'beagle.n.01', 'name': 'beagle'}, {'id': 3347, 'synset': 'bloodhound.n.01', 'name': 'bloodhound'}, {'id': 3348, 'synset': 'bluetick.n.01', 'name': 'bluetick'}, {'id': 3349, 'synset': 'boarhound.n.01', 'name': 'boarhound'}, {'id': 3350, 'synset': 'coonhound.n.01', 'name': 'coonhound'}, {'id': 3351, 'synset': 'coondog.n.01', 'name': 'coondog'}, {'id': 3352, 'synset': 'black-and-tan_coonhound.n.01', 'name': 'black-and-tan_coonhound'}, {'id': 3353, 'synset': 'dachshund.n.01', 'name': 'dachshund'}, {'id': 3354, 'synset': 'sausage_dog.n.01', 'name': 'sausage_dog'}, {'id': 3355, 'synset': 'foxhound.n.01', 'name': 'foxhound'}, {'id': 3356, 'synset': 'american_foxhound.n.01', 'name': 'American_foxhound'}, {'id': 3357, 'synset': 'walker_hound.n.01', 'name': 'Walker_hound'}, {'id': 3358, 'synset': 'english_foxhound.n.01', 'name': 'English_foxhound'}, {'id': 3359, 'synset': 'harrier.n.02', 'name': 'harrier'}, {'id': 3360, 'synset': 'plott_hound.n.01', 'name': 'Plott_hound'}, {'id': 3361, 'synset': 'redbone.n.01', 'name': 'redbone'}, {'id': 3362, 'synset': 'wolfhound.n.01', 'name': 'wolfhound'}, {'id': 3363, 'synset': 'borzoi.n.01', 'name': 'borzoi'}, {'id': 3364, 'synset': 'irish_wolfhound.n.01', 'name': 'Irish_wolfhound'}, {'id': 3365, 'synset': 'greyhound.n.01', 'name': 'greyhound'}, {'id': 3366, 'synset': 'italian_greyhound.n.01', 'name': 'Italian_greyhound'}, {'id': 3367, 'synset': 'whippet.n.01', 'name': 'whippet'}, {'id': 3368, 'synset': 'ibizan_hound.n.01', 'name': 'Ibizan_hound'}, {'id': 3369, 'synset': 'norwegian_elkhound.n.01', 'name': 'Norwegian_elkhound'}, {'id': 3370, 'synset': 'otterhound.n.01', 'name': 'otterhound'}, {'id': 3371, 'synset': 'saluki.n.01', 'name': 'Saluki'}, {'id': 3372, 'synset': 'scottish_deerhound.n.01', 'name': 'Scottish_deerhound'}, {'id': 3373, 'synset': 'staghound.n.01', 'name': 'staghound'}, {'id': 3374, 'synset': 'weimaraner.n.01', 'name': 'Weimaraner'}, {'id': 3375, 'synset': 'terrier.n.01', 'name': 'terrier'}, {'id': 3376, 'synset': 'bullterrier.n.01', 'name': 'bullterrier'}, {'id': 3377, 'synset': 'staffordshire_bullterrier.n.01', 'name': 'Staffordshire_bullterrier'}, {'id': 3378, 'synset': 'american_staffordshire_terrier.n.01', 'name': 'American_Staffordshire_terrier'}, {'id': 3379, 'synset': 'bedlington_terrier.n.01', 'name': 'Bedlington_terrier'}, {'id': 3380, 'synset': 'border_terrier.n.01', 'name': 'Border_terrier'}, {'id': 3381, 'synset': 'kerry_blue_terrier.n.01', 'name': 'Kerry_blue_terrier'}, {'id': 3382, 'synset': 'irish_terrier.n.01', 'name': 'Irish_terrier'}, {'id': 3383, 'synset': 'norfolk_terrier.n.01', 'name': 'Norfolk_terrier'}, {'id': 3384, 'synset': 'norwich_terrier.n.01', 'name': 'Norwich_terrier'}, {'id': 3385, 'synset': 'yorkshire_terrier.n.01', 'name': 'Yorkshire_terrier'}, {'id': 3386, 'synset': 'rat_terrier.n.01', 'name': 'rat_terrier'}, {'id': 3387, 'synset': 'manchester_terrier.n.01', 'name': 'Manchester_terrier'}, {'id': 3388, 'synset': 'toy_manchester.n.01', 'name': 'toy_Manchester'}, {'id': 3389, 'synset': 'fox_terrier.n.01', 'name': 'fox_terrier'}, {'id': 3390, 'synset': 'smooth-haired_fox_terrier.n.01', 'name': 'smooth-haired_fox_terrier'}, {'id': 3391, 'synset': 'wire-haired_fox_terrier.n.01', 'name': 'wire-haired_fox_terrier'}, {'id': 3392, 'synset': 'wirehair.n.01', 'name': 'wirehair'}, {'id': 3393, 'synset': 'lakeland_terrier.n.01', 'name': 'Lakeland_terrier'}, {'id': 3394, 'synset': 'welsh_terrier.n.01', 'name': 'Welsh_terrier'}, {'id': 3395, 'synset': 'sealyham_terrier.n.01', 'name': 'Sealyham_terrier'}, {'id': 3396, 'synset': 'airedale.n.01', 'name': 'Airedale'}, {'id': 3397, 'synset': 'cairn.n.02', 'name': 'cairn'}, {'id': 3398, 'synset': 'australian_terrier.n.01', 'name': 'Australian_terrier'}, {'id': 3399, 'synset': 'dandie_dinmont.n.01', 'name': 'Dandie_Dinmont'}, {'id': 3400, 'synset': 'boston_bull.n.01', 'name': 'Boston_bull'}, {'id': 3401, 'synset': 'schnauzer.n.01', 'name': 'schnauzer'}, {'id': 3402, 'synset': 'miniature_schnauzer.n.01', 'name': 'miniature_schnauzer'}, {'id': 3403, 'synset': 'giant_schnauzer.n.01', 'name': 'giant_schnauzer'}, {'id': 3404, 'synset': 'standard_schnauzer.n.01', 'name': 'standard_schnauzer'}, {'id': 3405, 'synset': 'scotch_terrier.n.01', 'name': 'Scotch_terrier'}, {'id': 3406, 'synset': 'tibetan_terrier.n.01', 'name': 'Tibetan_terrier'}, {'id': 3407, 'synset': 'silky_terrier.n.01', 'name': 'silky_terrier'}, {'id': 3408, 'synset': 'skye_terrier.n.01', 'name': 'Skye_terrier'}, {'id': 3409, 'synset': 'clydesdale_terrier.n.01', 'name': 'Clydesdale_terrier'}, {'id': 3410, 'synset': 'soft-coated_wheaten_terrier.n.01', 'name': 'soft-coated_wheaten_terrier'}, {'id': 3411, 'synset': 'west_highland_white_terrier.n.01', 'name': 'West_Highland_white_terrier'}, {'id': 3412, 'synset': 'lhasa.n.02', 'name': 'Lhasa'}, {'id': 3413, 'synset': 'sporting_dog.n.01', 'name': 'sporting_dog'}, {'id': 3414, 'synset': 'bird_dog.n.01', 'name': 'bird_dog'}, {'id': 3415, 'synset': 'water_dog.n.02', 'name': 'water_dog'}, {'id': 3416, 'synset': 'retriever.n.01', 'name': 'retriever'}, {'id': 3417, 'synset': 'flat-coated_retriever.n.01', 'name': 'flat-coated_retriever'}, {'id': 3418, 'synset': 'curly-coated_retriever.n.01', 'name': 'curly-coated_retriever'}, {'id': 3419, 'synset': 'golden_retriever.n.01', 'name': 'golden_retriever'}, {'id': 3420, 'synset': 'labrador_retriever.n.01', 'name': 'Labrador_retriever'}, {'id': 3421, 'synset': 'chesapeake_bay_retriever.n.01', 'name': 'Chesapeake_Bay_retriever'}, {'id': 3422, 'synset': 'pointer.n.04', 'name': 'pointer'}, {'id': 3423, 'synset': 'german_short-haired_pointer.n.01', 'name': 'German_short-haired_pointer'}, {'id': 3424, 'synset': 'setter.n.02', 'name': 'setter'}, {'id': 3425, 'synset': 'vizsla.n.01', 'name': 'vizsla'}, {'id': 3426, 'synset': 'english_setter.n.01', 'name': 'English_setter'}, {'id': 3427, 'synset': 'irish_setter.n.01', 'name': 'Irish_setter'}, {'id': 3428, 'synset': 'gordon_setter.n.01', 'name': 'Gordon_setter'}, {'id': 3429, 'synset': 'spaniel.n.01', 'name': 'spaniel'}, {'id': 3430, 'synset': 'brittany_spaniel.n.01', 'name': 'Brittany_spaniel'}, {'id': 3431, 'synset': 'clumber.n.01', 'name': 'clumber'}, {'id': 3432, 'synset': 'field_spaniel.n.01', 'name': 'field_spaniel'}, {'id': 3433, 'synset': 'springer_spaniel.n.01', 'name': 'springer_spaniel'}, {'id': 3434, 'synset': 'english_springer.n.01', 'name': 'English_springer'}, {'id': 3435, 'synset': 'welsh_springer_spaniel.n.01', 'name': 'Welsh_springer_spaniel'}, {'id': 3436, 'synset': 'cocker_spaniel.n.01', 'name': 'cocker_spaniel'}, {'id': 3437, 'synset': 'sussex_spaniel.n.01', 'name': 'Sussex_spaniel'}, {'id': 3438, 'synset': 'water_spaniel.n.01', 'name': 'water_spaniel'}, {'id': 3439, 'synset': 'american_water_spaniel.n.01', 'name': 'American_water_spaniel'}, {'id': 3440, 'synset': 'irish_water_spaniel.n.01', 'name': 'Irish_water_spaniel'}, {'id': 3441, 'synset': 'griffon.n.03', 'name': 'griffon'}, {'id': 3442, 'synset': 'working_dog.n.01', 'name': 'working_dog'}, {'id': 3443, 'synset': 'watchdog.n.02', 'name': 'watchdog'}, {'id': 3444, 'synset': 'kuvasz.n.01', 'name': 'kuvasz'}, {'id': 3445, 'synset': 'attack_dog.n.01', 'name': 'attack_dog'}, {'id': 3446, 'synset': 'housedog.n.01', 'name': 'housedog'}, {'id': 3447, 'synset': 'schipperke.n.01', 'name': 'schipperke'}, {'id': 3448, 'synset': 'belgian_sheepdog.n.01', 'name': 'Belgian_sheepdog'}, {'id': 3449, 'synset': 'groenendael.n.01', 'name': 'groenendael'}, {'id': 3450, 'synset': 'malinois.n.01', 'name': 'malinois'}, {'id': 3451, 'synset': 'briard.n.01', 'name': 'briard'}, {'id': 3452, 'synset': 'kelpie.n.02', 'name': 'kelpie'}, {'id': 3453, 'synset': 'komondor.n.01', 'name': 'komondor'}, {'id': 3454, 'synset': 'old_english_sheepdog.n.01', 'name': 'Old_English_sheepdog'}, {'id': 3455, 'synset': 'shetland_sheepdog.n.01', 'name': 'Shetland_sheepdog'}, {'id': 3456, 'synset': 'collie.n.01', 'name': 'collie'}, {'id': 3457, 'synset': 'border_collie.n.01', 'name': 'Border_collie'}, {'id': 3458, 'synset': 'bouvier_des_flandres.n.01', 'name': 'Bouvier_des_Flandres'}, {'id': 3459, 'synset': 'rottweiler.n.01', 'name': 'Rottweiler'}, {'id': 3460, 'synset': 'german_shepherd.n.01', 'name': 'German_shepherd'}, {'id': 3461, 'synset': 'police_dog.n.01', 'name': 'police_dog'}, {'id': 3462, 'synset': 'pinscher.n.01', 'name': 'pinscher'}, {'id': 3463, 'synset': 'doberman.n.01', 'name': 'Doberman'}, {'id': 3464, 'synset': 'miniature_pinscher.n.01', 'name': 'miniature_pinscher'}, {'id': 3465, 'synset': 'sennenhunde.n.01', 'name': 'Sennenhunde'}, {'id': 3466, 'synset': 'greater_swiss_mountain_dog.n.01', 'name': 'Greater_Swiss_Mountain_dog'}, {'id': 3467, 'synset': 'bernese_mountain_dog.n.01', 'name': 'Bernese_mountain_dog'}, {'id': 3468, 'synset': 'appenzeller.n.01', 'name': 'Appenzeller'}, {'id': 3469, 'synset': 'entlebucher.n.01', 'name': 'EntleBucher'}, {'id': 3470, 'synset': 'boxer.n.04', 'name': 'boxer'}, {'id': 3471, 'synset': 'mastiff.n.01', 'name': 'mastiff'}, {'id': 3472, 'synset': 'bull_mastiff.n.01', 'name': 'bull_mastiff'}, {'id': 3473, 'synset': 'tibetan_mastiff.n.01', 'name': 'Tibetan_mastiff'}, {'id': 3474, 'synset': 'french_bulldog.n.01', 'name': 'French_bulldog'}, {'id': 3475, 'synset': 'great_dane.n.01', 'name': 'Great_Dane'}, {'id': 3476, 'synset': 'guide_dog.n.01', 'name': 'guide_dog'}, {'id': 3477, 'synset': 'seeing_eye_dog.n.01', 'name': 'Seeing_Eye_dog'}, {'id': 3478, 'synset': 'hearing_dog.n.01', 'name': 'hearing_dog'}, {'id': 3479, 'synset': 'saint_bernard.n.01', 'name': 'Saint_Bernard'}, {'id': 3480, 'synset': 'seizure-alert_dog.n.01', 'name': 'seizure-alert_dog'}, {'id': 3481, 'synset': 'sled_dog.n.01', 'name': 'sled_dog'}, {'id': 3482, 'synset': 'eskimo_dog.n.01', 'name': 'Eskimo_dog'}, {'id': 3483, 'synset': 'malamute.n.01', 'name': 'malamute'}, {'id': 3484, 'synset': 'siberian_husky.n.01', 'name': 'Siberian_husky'}, {'id': 3485, 'synset': 'liver-spotted_dalmatian.n.01', 'name': 'liver-spotted_dalmatian'}, {'id': 3486, 'synset': 'affenpinscher.n.01', 'name': 'affenpinscher'}, {'id': 3487, 'synset': 'basenji.n.01', 'name': 'basenji'}, {'id': 3488, 'synset': 'leonberg.n.01', 'name': 'Leonberg'}, {'id': 3489, 'synset': 'newfoundland.n.01', 'name': 'Newfoundland'}, {'id': 3490, 'synset': 'great_pyrenees.n.01', 'name': 'Great_Pyrenees'}, {'id': 3491, 'synset': 'spitz.n.01', 'name': 'spitz'}, {'id': 3492, 'synset': 'samoyed.n.03', 'name': 'Samoyed'}, {'id': 3493, 'synset': 'pomeranian.n.01', 'name': 'Pomeranian'}, {'id': 3494, 'synset': 'chow.n.03', 'name': 'chow'}, {'id': 3495, 'synset': 'keeshond.n.01', 'name': 'keeshond'}, {'id': 3496, 'synset': 'griffon.n.02', 'name': 'griffon'}, {'id': 3497, 'synset': 'brabancon_griffon.n.01', 'name': 'Brabancon_griffon'}, {'id': 3498, 'synset': 'corgi.n.01', 'name': 'corgi'}, {'id': 3499, 'synset': 'pembroke.n.01', 'name': 'Pembroke'}, {'id': 3500, 'synset': 'cardigan.n.02', 'name': 'Cardigan'}, {'id': 3501, 'synset': 'poodle.n.01', 'name': 'poodle'}, {'id': 3502, 'synset': 'toy_poodle.n.01', 'name': 'toy_poodle'}, {'id': 3503, 'synset': 'miniature_poodle.n.01', 'name': 'miniature_poodle'}, {'id': 3504, 'synset': 'standard_poodle.n.01', 'name': 'standard_poodle'}, {'id': 3505, 'synset': 'large_poodle.n.01', 'name': 'large_poodle'}, {'id': 3506, 'synset': 'mexican_hairless.n.01', 'name': 'Mexican_hairless'}, {'id': 3507, 'synset': 'timber_wolf.n.01', 'name': 'timber_wolf'}, {'id': 3508, 'synset': 'white_wolf.n.01', 'name': 'white_wolf'}, {'id': 3509, 'synset': 'red_wolf.n.01', 'name': 'red_wolf'}, {'id': 3510, 'synset': 'coyote.n.01', 'name': 'coyote'}, {'id': 3511, 'synset': 'coydog.n.01', 'name': 'coydog'}, {'id': 3512, 'synset': 'jackal.n.01', 'name': 'jackal'}, {'id': 3513, 'synset': 'wild_dog.n.01', 'name': 'wild_dog'}, {'id': 3514, 'synset': 'dingo.n.01', 'name': 'dingo'}, {'id': 3515, 'synset': 'dhole.n.01', 'name': 'dhole'}, {'id': 3516, 'synset': 'crab-eating_dog.n.01', 'name': 'crab-eating_dog'}, {'id': 3517, 'synset': 'raccoon_dog.n.01', 'name': 'raccoon_dog'}, {'id': 3518, 'synset': 'african_hunting_dog.n.01', 'name': 'African_hunting_dog'}, {'id': 3519, 'synset': 'hyena.n.01', 'name': 'hyena'}, {'id': 3520, 'synset': 'striped_hyena.n.01', 'name': 'striped_hyena'}, {'id': 3521, 'synset': 'brown_hyena.n.01', 'name': 'brown_hyena'}, {'id': 3522, 'synset': 'spotted_hyena.n.01', 'name': 'spotted_hyena'}, {'id': 3523, 'synset': 'aardwolf.n.01', 'name': 'aardwolf'}, {'id': 3524, 'synset': 'fox.n.01', 'name': 'fox'}, {'id': 3525, 'synset': 'vixen.n.02', 'name': 'vixen'}, {'id': 3526, 'synset': 'reynard.n.01', 'name': 'Reynard'}, {'id': 3527, 'synset': 'red_fox.n.03', 'name': 'red_fox'}, {'id': 3528, 'synset': 'black_fox.n.01', 'name': 'black_fox'}, {'id': 3529, 'synset': 'silver_fox.n.01', 'name': 'silver_fox'}, {'id': 3530, 'synset': 'red_fox.n.02', 'name': 'red_fox'}, {'id': 3531, 'synset': 'kit_fox.n.02', 'name': 'kit_fox'}, {'id': 3532, 'synset': 'kit_fox.n.01', 'name': 'kit_fox'}, {'id': 3533, 'synset': 'arctic_fox.n.01', 'name': 'Arctic_fox'}, {'id': 3534, 'synset': 'blue_fox.n.01', 'name': 'blue_fox'}, {'id': 3535, 'synset': 'grey_fox.n.01', 'name': 'grey_fox'}, {'id': 3536, 'synset': 'feline.n.01', 'name': 'feline'}, {'id': 3537, 'synset': 'domestic_cat.n.01', 'name': 'domestic_cat'}, {'id': 3538, 'synset': 'kitty.n.04', 'name': 'kitty'}, {'id': 3539, 'synset': 'mouser.n.01', 'name': 'mouser'}, {'id': 3540, 'synset': 'alley_cat.n.01', 'name': 'alley_cat'}, {'id': 3541, 'synset': 'stray.n.01', 'name': 'stray'}, {'id': 3542, 'synset': 'tom.n.02', 'name': 'tom'}, {'id': 3543, 'synset': 'gib.n.02', 'name': 'gib'}, {'id': 3544, 'synset': 'tabby.n.02', 'name': 'tabby'}, {'id': 3545, 'synset': 'tabby.n.01', 'name': 'tabby'}, {'id': 3546, 'synset': 'tiger_cat.n.02', 'name': 'tiger_cat'}, {'id': 3547, 'synset': 'tortoiseshell.n.03', 'name': 'tortoiseshell'}, {'id': 3548, 'synset': 'persian_cat.n.01', 'name': 'Persian_cat'}, {'id': 3549, 'synset': 'angora.n.04', 'name': 'Angora'}, {'id': 3550, 'synset': 'siamese_cat.n.01', 'name': 'Siamese_cat'}, {'id': 3551, 'synset': 'blue_point_siamese.n.01', 'name': 'blue_point_Siamese'}, {'id': 3552, 'synset': 'burmese_cat.n.01', 'name': 'Burmese_cat'}, {'id': 3553, 'synset': 'egyptian_cat.n.01', 'name': 'Egyptian_cat'}, {'id': 3554, 'synset': 'maltese.n.03', 'name': 'Maltese'}, {'id': 3555, 'synset': 'abyssinian.n.01', 'name': 'Abyssinian'}, {'id': 3556, 'synset': 'manx.n.02', 'name': 'Manx'}, {'id': 3557, 'synset': 'wildcat.n.03', 'name': 'wildcat'}, {'id': 3558, 'synset': 'sand_cat.n.01', 'name': 'sand_cat'}, {'id': 3559, 'synset': 'european_wildcat.n.01', 'name': 'European_wildcat'}, {'id': 3560, 'synset': 'ocelot.n.01', 'name': 'ocelot'}, {'id': 3561, 'synset': 'jaguarundi.n.01', 'name': 'jaguarundi'}, {'id': 3562, 'synset': 'kaffir_cat.n.01', 'name': 'kaffir_cat'}, {'id': 3563, 'synset': 'jungle_cat.n.01', 'name': 'jungle_cat'}, {'id': 3564, 'synset': 'serval.n.01', 'name': 'serval'}, {'id': 3565, 'synset': 'leopard_cat.n.01', 'name': 'leopard_cat'}, {'id': 3566, 'synset': 'margay.n.01', 'name': 'margay'}, {'id': 3567, 'synset': 'manul.n.01', 'name': 'manul'}, {'id': 3568, 'synset': 'lynx.n.02', 'name': 'lynx'}, {'id': 3569, 'synset': 'common_lynx.n.01', 'name': 'common_lynx'}, {'id': 3570, 'synset': 'canada_lynx.n.01', 'name': 'Canada_lynx'}, {'id': 3571, 'synset': 'bobcat.n.01', 'name': 'bobcat'}, {'id': 3572, 'synset': 'spotted_lynx.n.01', 'name': 'spotted_lynx'}, {'id': 3573, 'synset': 'caracal.n.01', 'name': 'caracal'}, {'id': 3574, 'synset': 'big_cat.n.01', 'name': 'big_cat'}, {'id': 3575, 'synset': 'leopard.n.02', 'name': 'leopard'}, {'id': 3576, 'synset': 'leopardess.n.01', 'name': 'leopardess'}, {'id': 3577, 'synset': 'panther.n.02', 'name': 'panther'}, {'id': 3578, 'synset': 'snow_leopard.n.01', 'name': 'snow_leopard'}, {'id': 3579, 'synset': 'jaguar.n.01', 'name': 'jaguar'}, {'id': 3580, 'synset': 'lioness.n.01', 'name': 'lioness'}, {'id': 3581, 'synset': 'lionet.n.01', 'name': 'lionet'}, {'id': 3582, 'synset': 'bengal_tiger.n.01', 'name': 'Bengal_tiger'}, {'id': 3583, 'synset': 'tigress.n.01', 'name': 'tigress'}, {'id': 3584, 'synset': 'liger.n.01', 'name': 'liger'}, {'id': 3585, 'synset': 'tiglon.n.01', 'name': 'tiglon'}, {'id': 3586, 'synset': 'cheetah.n.01', 'name': 'cheetah'}, {'id': 3587, 'synset': 'saber-toothed_tiger.n.01', 'name': 'saber-toothed_tiger'}, {'id': 3588, 'synset': 'smiledon_californicus.n.01', 'name': 'Smiledon_californicus'}, {'id': 3589, 'synset': 'brown_bear.n.01', 'name': 'brown_bear'}, {'id': 3590, 'synset': 'bruin.n.01', 'name': 'bruin'}, {'id': 3591, 'synset': 'syrian_bear.n.01', 'name': 'Syrian_bear'}, {'id': 3592, 'synset': 'alaskan_brown_bear.n.01', 'name': 'Alaskan_brown_bear'}, {'id': 3593, 'synset': 'american_black_bear.n.01', 'name': 'American_black_bear'}, {'id': 3594, 'synset': 'cinnamon_bear.n.01', 'name': 'cinnamon_bear'}, {'id': 3595, 'synset': 'asiatic_black_bear.n.01', 'name': 'Asiatic_black_bear'}, {'id': 3596, 'synset': 'sloth_bear.n.01', 'name': 'sloth_bear'}, {'id': 3597, 'synset': 'viverrine.n.01', 'name': 'viverrine'}, {'id': 3598, 'synset': 'civet.n.01', 'name': 'civet'}, {'id': 3599, 'synset': 'large_civet.n.01', 'name': 'large_civet'}, {'id': 3600, 'synset': 'small_civet.n.01', 'name': 'small_civet'}, {'id': 3601, 'synset': 'binturong.n.01', 'name': 'binturong'}, {'id': 3602, 'synset': 'cryptoprocta.n.01', 'name': 'Cryptoprocta'}, {'id': 3603, 'synset': 'fossa.n.03', 'name': 'fossa'}, {'id': 3604, 'synset': 'fanaloka.n.01', 'name': 'fanaloka'}, {'id': 3605, 'synset': 'genet.n.03', 'name': 'genet'}, {'id': 3606, 'synset': 'banded_palm_civet.n.01', 'name': 'banded_palm_civet'}, {'id': 3607, 'synset': 'mongoose.n.01', 'name': 'mongoose'}, {'id': 3608, 'synset': 'indian_mongoose.n.01', 'name': 'Indian_mongoose'}, {'id': 3609, 'synset': 'ichneumon.n.01', 'name': 'ichneumon'}, {'id': 3610, 'synset': 'palm_cat.n.01', 'name': 'palm_cat'}, {'id': 3611, 'synset': 'meerkat.n.01', 'name': 'meerkat'}, {'id': 3612, 'synset': 'slender-tailed_meerkat.n.01', 'name': 'slender-tailed_meerkat'}, {'id': 3613, 'synset': 'suricate.n.01', 'name': 'suricate'}, {'id': 3614, 'synset': 'fruit_bat.n.01', 'name': 'fruit_bat'}, {'id': 3615, 'synset': 'flying_fox.n.01', 'name': 'flying_fox'}, {'id': 3616, 'synset': 'pteropus_capestratus.n.01', 'name': 'Pteropus_capestratus'}, {'id': 3617, 'synset': 'pteropus_hypomelanus.n.01', 'name': 'Pteropus_hypomelanus'}, {'id': 3618, 'synset': 'harpy.n.03', 'name': 'harpy'}, {'id': 3619, 'synset': 'cynopterus_sphinx.n.01', 'name': 'Cynopterus_sphinx'}, {'id': 3620, 'synset': 'carnivorous_bat.n.01', 'name': 'carnivorous_bat'}, {'id': 3621, 'synset': 'mouse-eared_bat.n.01', 'name': 'mouse-eared_bat'}, {'id': 3622, 'synset': 'leafnose_bat.n.01', 'name': 'leafnose_bat'}, {'id': 3623, 'synset': 'macrotus.n.01', 'name': 'macrotus'}, {'id': 3624, 'synset': 'spearnose_bat.n.01', 'name': 'spearnose_bat'}, {'id': 3625, 'synset': 'phyllostomus_hastatus.n.01', 'name': 'Phyllostomus_hastatus'}, {'id': 3626, 'synset': 'hognose_bat.n.01', 'name': 'hognose_bat'}, {'id': 3627, 'synset': 'horseshoe_bat.n.02', 'name': 'horseshoe_bat'}, {'id': 3628, 'synset': 'horseshoe_bat.n.01', 'name': 'horseshoe_bat'}, {'id': 3629, 'synset': 'orange_bat.n.01', 'name': 'orange_bat'}, {'id': 3630, 'synset': 'false_vampire.n.01', 'name': 'false_vampire'}, {'id': 3631, 'synset': 'big-eared_bat.n.01', 'name': 'big-eared_bat'}, {'id': 3632, 'synset': 'vespertilian_bat.n.01', 'name': 'vespertilian_bat'}, {'id': 3633, 'synset': 'frosted_bat.n.01', 'name': 'frosted_bat'}, {'id': 3634, 'synset': 'red_bat.n.01', 'name': 'red_bat'}, {'id': 3635, 'synset': 'brown_bat.n.01', 'name': 'brown_bat'}, {'id': 3636, 'synset': 'little_brown_bat.n.01', 'name': 'little_brown_bat'}, {'id': 3637, 'synset': 'cave_myotis.n.01', 'name': 'cave_myotis'}, {'id': 3638, 'synset': 'big_brown_bat.n.01', 'name': 'big_brown_bat'}, {'id': 3639, 'synset': 'serotine.n.01', 'name': 'serotine'}, {'id': 3640, 'synset': 'pallid_bat.n.01', 'name': 'pallid_bat'}, {'id': 3641, 'synset': 'pipistrelle.n.01', 'name': 'pipistrelle'}, {'id': 3642, 'synset': 'eastern_pipistrel.n.01', 'name': 'eastern_pipistrel'}, {'id': 3643, 'synset': 'jackass_bat.n.01', 'name': 'jackass_bat'}, {'id': 3644, 'synset': 'long-eared_bat.n.01', 'name': 'long-eared_bat'}, {'id': 3645, 'synset': 'western_big-eared_bat.n.01', 'name': 'western_big-eared_bat'}, {'id': 3646, 'synset': 'freetail.n.01', 'name': 'freetail'}, {'id': 3647, 'synset': 'guano_bat.n.01', 'name': 'guano_bat'}, {'id': 3648, 'synset': 'pocketed_bat.n.01', 'name': 'pocketed_bat'}, {'id': 3649, 'synset': 'mastiff_bat.n.01', 'name': 'mastiff_bat'}, {'id': 3650, 'synset': 'vampire_bat.n.01', 'name': 'vampire_bat'}, {'id': 3651, 'synset': 'desmodus_rotundus.n.01', 'name': 'Desmodus_rotundus'}, {'id': 3652, 'synset': 'hairy-legged_vampire_bat.n.01', 'name': 'hairy-legged_vampire_bat'}, {'id': 3653, 'synset': 'predator.n.02', 'name': 'predator'}, {'id': 3654, 'synset': 'prey.n.02', 'name': 'prey'}, {'id': 3655, 'synset': 'game.n.04', 'name': 'game'}, {'id': 3656, 'synset': 'big_game.n.01', 'name': 'big_game'}, {'id': 3657, 'synset': 'game_bird.n.01', 'name': 'game_bird'}, {'id': 3658, 'synset': 'fossorial_mammal.n.01', 'name': 'fossorial_mammal'}, {'id': 3659, 'synset': 'tetrapod.n.01', 'name': 'tetrapod'}, {'id': 3660, 'synset': 'quadruped.n.01', 'name': 'quadruped'}, {'id': 3661, 'synset': 'hexapod.n.01', 'name': 'hexapod'}, {'id': 3662, 'synset': 'biped.n.01', 'name': 'biped'}, {'id': 3663, 'synset': 'insect.n.01', 'name': 'insect'}, {'id': 3664, 'synset': 'social_insect.n.01', 'name': 'social_insect'}, {'id': 3665, 'synset': 'holometabola.n.01', 'name': 'holometabola'}, {'id': 3666, 'synset': 'defoliator.n.01', 'name': 'defoliator'}, {'id': 3667, 'synset': 'pollinator.n.01', 'name': 'pollinator'}, {'id': 3668, 'synset': 'gallfly.n.03', 'name': 'gallfly'}, {'id': 3669, 'synset': 'scorpion_fly.n.01', 'name': 'scorpion_fly'}, {'id': 3670, 'synset': 'hanging_fly.n.01', 'name': 'hanging_fly'}, {'id': 3671, 'synset': 'collembolan.n.01', 'name': 'collembolan'}, {'id': 3672, 'synset': 'tiger_beetle.n.01', 'name': 'tiger_beetle'}, {'id': 3673, 'synset': 'two-spotted_ladybug.n.01', 'name': 'two-spotted_ladybug'}, {'id': 3674, 'synset': 'mexican_bean_beetle.n.01', 'name': 'Mexican_bean_beetle'}, {'id': 3675, 'synset': 'hippodamia_convergens.n.01', 'name': 'Hippodamia_convergens'}, {'id': 3676, 'synset': 'vedalia.n.01', 'name': 'vedalia'}, {'id': 3677, 'synset': 'ground_beetle.n.01', 'name': 'ground_beetle'}, {'id': 3678, 'synset': 'bombardier_beetle.n.01', 'name': 'bombardier_beetle'}, {'id': 3679, 'synset': 'calosoma.n.01', 'name': 'calosoma'}, {'id': 3680, 'synset': 'searcher.n.03', 'name': 'searcher'}, {'id': 3681, 'synset': 'firefly.n.02', 'name': 'firefly'}, {'id': 3682, 'synset': 'glowworm.n.01', 'name': 'glowworm'}, {'id': 3683, 'synset': 'long-horned_beetle.n.01', 'name': 'long-horned_beetle'}, {'id': 3684, 'synset': 'sawyer.n.02', 'name': 'sawyer'}, {'id': 3685, 'synset': 'pine_sawyer.n.01', 'name': 'pine_sawyer'}, {'id': 3686, 'synset': 'leaf_beetle.n.01', 'name': 'leaf_beetle'}, {'id': 3687, 'synset': 'flea_beetle.n.01', 'name': 'flea_beetle'}, {'id': 3688, 'synset': 'colorado_potato_beetle.n.01', 'name': 'Colorado_potato_beetle'}, {'id': 3689, 'synset': 'carpet_beetle.n.01', 'name': 'carpet_beetle'}, {'id': 3690, 'synset': 'buffalo_carpet_beetle.n.01', 'name': 'buffalo_carpet_beetle'}, {'id': 3691, 'synset': 'black_carpet_beetle.n.01', 'name': 'black_carpet_beetle'}, {'id': 3692, 'synset': 'clerid_beetle.n.01', 'name': 'clerid_beetle'}, {'id': 3693, 'synset': 'bee_beetle.n.01', 'name': 'bee_beetle'}, {'id': 3694, 'synset': 'lamellicorn_beetle.n.01', 'name': 'lamellicorn_beetle'}, {'id': 3695, 'synset': 'scarabaeid_beetle.n.01', 'name': 'scarabaeid_beetle'}, {'id': 3696, 'synset': 'dung_beetle.n.01', 'name': 'dung_beetle'}, {'id': 3697, 'synset': 'scarab.n.01', 'name': 'scarab'}, {'id': 3698, 'synset': 'tumblebug.n.01', 'name': 'tumblebug'}, {'id': 3699, 'synset': 'dorbeetle.n.01', 'name': 'dorbeetle'}, {'id': 3700, 'synset': 'june_beetle.n.01', 'name': 'June_beetle'}, {'id': 3701, 'synset': 'green_june_beetle.n.01', 'name': 'green_June_beetle'}, {'id': 3702, 'synset': 'japanese_beetle.n.01', 'name': 'Japanese_beetle'}, {'id': 3703, 'synset': 'oriental_beetle.n.01', 'name': 'Oriental_beetle'}, {'id': 3704, 'synset': 'rhinoceros_beetle.n.01', 'name': 'rhinoceros_beetle'}, {'id': 3705, 'synset': 'melolonthid_beetle.n.01', 'name': 'melolonthid_beetle'}, {'id': 3706, 'synset': 'cockchafer.n.01', 'name': 'cockchafer'}, {'id': 3707, 'synset': 'rose_chafer.n.02', 'name': 'rose_chafer'}, {'id': 3708, 'synset': 'rose_chafer.n.01', 'name': 'rose_chafer'}, {'id': 3709, 'synset': 'stag_beetle.n.01', 'name': 'stag_beetle'}, {'id': 3710, 'synset': 'elaterid_beetle.n.01', 'name': 'elaterid_beetle'}, {'id': 3711, 'synset': 'click_beetle.n.01', 'name': 'click_beetle'}, {'id': 3712, 'synset': 'firefly.n.01', 'name': 'firefly'}, {'id': 3713, 'synset': 'wireworm.n.01', 'name': 'wireworm'}, {'id': 3714, 'synset': 'water_beetle.n.01', 'name': 'water_beetle'}, {'id': 3715, 'synset': 'whirligig_beetle.n.01', 'name': 'whirligig_beetle'}, {'id': 3716, 'synset': 'deathwatch_beetle.n.01', 'name': 'deathwatch_beetle'}, {'id': 3717, 'synset': 'weevil.n.01', 'name': 'weevil'}, {'id': 3718, 'synset': 'snout_beetle.n.01', 'name': 'snout_beetle'}, {'id': 3719, 'synset': 'boll_weevil.n.01', 'name': 'boll_weevil'}, {'id': 3720, 'synset': 'blister_beetle.n.01', 'name': 'blister_beetle'}, {'id': 3721, 'synset': 'oil_beetle.n.01', 'name': 'oil_beetle'}, {'id': 3722, 'synset': 'spanish_fly.n.01', 'name': 'Spanish_fly'}, {'id': 3723, 'synset': 'dutch-elm_beetle.n.01', 'name': 'Dutch-elm_beetle'}, {'id': 3724, 'synset': 'bark_beetle.n.01', 'name': 'bark_beetle'}, {'id': 3725, 'synset': 'spruce_bark_beetle.n.01', 'name': 'spruce_bark_beetle'}, {'id': 3726, 'synset': 'rove_beetle.n.01', 'name': 'rove_beetle'}, {'id': 3727, 'synset': 'darkling_beetle.n.01', 'name': 'darkling_beetle'}, {'id': 3728, 'synset': 'mealworm.n.01', 'name': 'mealworm'}, {'id': 3729, 'synset': 'flour_beetle.n.01', 'name': 'flour_beetle'}, {'id': 3730, 'synset': 'seed_beetle.n.01', 'name': 'seed_beetle'}, {'id': 3731, 'synset': 'pea_weevil.n.01', 'name': 'pea_weevil'}, {'id': 3732, 'synset': 'bean_weevil.n.01', 'name': 'bean_weevil'}, {'id': 3733, 'synset': 'rice_weevil.n.01', 'name': 'rice_weevil'}, {'id': 3734, 'synset': 'asian_longhorned_beetle.n.01', 'name': 'Asian_longhorned_beetle'}, {'id': 3735, 'synset': 'web_spinner.n.01', 'name': 'web_spinner'}, {'id': 3736, 'synset': 'louse.n.01', 'name': 'louse'}, {'id': 3737, 'synset': 'common_louse.n.01', 'name': 'common_louse'}, {'id': 3738, 'synset': 'head_louse.n.01', 'name': 'head_louse'}, {'id': 3739, 'synset': 'body_louse.n.01', 'name': 'body_louse'}, {'id': 3740, 'synset': 'crab_louse.n.01', 'name': 'crab_louse'}, {'id': 3741, 'synset': 'bird_louse.n.01', 'name': 'bird_louse'}, {'id': 3742, 'synset': 'flea.n.01', 'name': 'flea'}, {'id': 3743, 'synset': 'pulex_irritans.n.01', 'name': 'Pulex_irritans'}, {'id': 3744, 'synset': 'dog_flea.n.01', 'name': 'dog_flea'}, {'id': 3745, 'synset': 'cat_flea.n.01', 'name': 'cat_flea'}, {'id': 3746, 'synset': 'chigoe.n.01', 'name': 'chigoe'}, {'id': 3747, 'synset': 'sticktight.n.02', 'name': 'sticktight'}, {'id': 3748, 'synset': 'dipterous_insect.n.01', 'name': 'dipterous_insect'}, {'id': 3749, 'synset': 'gall_midge.n.01', 'name': 'gall_midge'}, {'id': 3750, 'synset': 'hessian_fly.n.01', 'name': 'Hessian_fly'}, {'id': 3751, 'synset': 'fly.n.01', 'name': 'fly'}, {'id': 3752, 'synset': 'housefly.n.01', 'name': 'housefly'}, {'id': 3753, 'synset': 'tsetse_fly.n.01', 'name': 'tsetse_fly'}, {'id': 3754, 'synset': 'blowfly.n.01', 'name': 'blowfly'}, {'id': 3755, 'synset': 'bluebottle.n.02', 'name': 'bluebottle'}, {'id': 3756, 'synset': 'greenbottle.n.01', 'name': 'greenbottle'}, {'id': 3757, 'synset': 'flesh_fly.n.01', 'name': 'flesh_fly'}, {'id': 3758, 'synset': 'tachina_fly.n.01', 'name': 'tachina_fly'}, {'id': 3759, 'synset': 'gadfly.n.02', 'name': 'gadfly'}, {'id': 3760, 'synset': 'botfly.n.01', 'name': 'botfly'}, {'id': 3761, 'synset': 'human_botfly.n.01', 'name': 'human_botfly'}, {'id': 3762, 'synset': 'sheep_botfly.n.01', 'name': 'sheep_botfly'}, {'id': 3763, 'synset': 'warble_fly.n.01', 'name': 'warble_fly'}, {'id': 3764, 'synset': 'horsefly.n.02', 'name': 'horsefly'}, {'id': 3765, 'synset': 'bee_fly.n.01', 'name': 'bee_fly'}, {'id': 3766, 'synset': 'robber_fly.n.01', 'name': 'robber_fly'}, {'id': 3767, 'synset': 'fruit_fly.n.01', 'name': 'fruit_fly'}, {'id': 3768, 'synset': 'apple_maggot.n.01', 'name': 'apple_maggot'}, {'id': 3769, 'synset': 'mediterranean_fruit_fly.n.01', 'name': 'Mediterranean_fruit_fly'}, {'id': 3770, 'synset': 'drosophila.n.01', 'name': 'drosophila'}, {'id': 3771, 'synset': 'vinegar_fly.n.01', 'name': 'vinegar_fly'}, {'id': 3772, 'synset': 'leaf_miner.n.01', 'name': 'leaf_miner'}, {'id': 3773, 'synset': 'louse_fly.n.01', 'name': 'louse_fly'}, {'id': 3774, 'synset': 'horse_tick.n.01', 'name': 'horse_tick'}, {'id': 3775, 'synset': 'sheep_ked.n.01', 'name': 'sheep_ked'}, {'id': 3776, 'synset': 'horn_fly.n.01', 'name': 'horn_fly'}, {'id': 3777, 'synset': 'mosquito.n.01', 'name': 'mosquito'}, {'id': 3778, 'synset': 'wiggler.n.02', 'name': 'wiggler'}, {'id': 3779, 'synset': 'gnat.n.02', 'name': 'gnat'}, {'id': 3780, 'synset': 'yellow-fever_mosquito.n.01', 'name': 'yellow-fever_mosquito'}, {'id': 3781, 'synset': 'asian_tiger_mosquito.n.01', 'name': 'Asian_tiger_mosquito'}, {'id': 3782, 'synset': 'anopheline.n.01', 'name': 'anopheline'}, {'id': 3783, 'synset': 'malarial_mosquito.n.01', 'name': 'malarial_mosquito'}, {'id': 3784, 'synset': 'common_mosquito.n.01', 'name': 'common_mosquito'}, {'id': 3785, 'synset': 'culex_quinquefasciatus.n.01', 'name': 'Culex_quinquefasciatus'}, {'id': 3786, 'synset': 'gnat.n.01', 'name': 'gnat'}, {'id': 3787, 'synset': 'punkie.n.01', 'name': 'punkie'}, {'id': 3788, 'synset': 'midge.n.01', 'name': 'midge'}, {'id': 3789, 'synset': 'fungus_gnat.n.02', 'name': 'fungus_gnat'}, {'id': 3790, 'synset': 'psychodid.n.01', 'name': 'psychodid'}, {'id': 3791, 'synset': 'sand_fly.n.01', 'name': 'sand_fly'}, {'id': 3792, 'synset': 'fungus_gnat.n.01', 'name': 'fungus_gnat'}, {'id': 3793, 'synset': 'armyworm.n.03', 'name': 'armyworm'}, {'id': 3794, 'synset': 'crane_fly.n.01', 'name': 'crane_fly'}, {'id': 3795, 'synset': 'blackfly.n.02', 'name': 'blackfly'}, {'id': 3796, 'synset': 'hymenopterous_insect.n.01', 'name': 'hymenopterous_insect'}, {'id': 3797, 'synset': 'bee.n.01', 'name': 'bee'}, {'id': 3798, 'synset': 'drone.n.01', 'name': 'drone'}, {'id': 3799, 'synset': 'queen_bee.n.01', 'name': 'queen_bee'}, {'id': 3800, 'synset': 'worker.n.03', 'name': 'worker'}, {'id': 3801, 'synset': 'soldier.n.02', 'name': 'soldier'}, {'id': 3802, 'synset': 'worker_bee.n.01', 'name': 'worker_bee'}, {'id': 3803, 'synset': 'honeybee.n.01', 'name': 'honeybee'}, {'id': 3804, 'synset': 'africanized_bee.n.01', 'name': 'Africanized_bee'}, {'id': 3805, 'synset': 'black_bee.n.01', 'name': 'black_bee'}, {'id': 3806, 'synset': 'carniolan_bee.n.01', 'name': 'Carniolan_bee'}, {'id': 3807, 'synset': 'italian_bee.n.01', 'name': 'Italian_bee'}, {'id': 3808, 'synset': 'carpenter_bee.n.01', 'name': 'carpenter_bee'}, {'id': 3809, 'synset': 'bumblebee.n.01', 'name': 'bumblebee'}, {'id': 3810, 'synset': 'cuckoo-bumblebee.n.01', 'name': 'cuckoo-bumblebee'}, {'id': 3811, 'synset': 'andrena.n.01', 'name': 'andrena'}, {'id': 3812, 'synset': 'nomia_melanderi.n.01', 'name': 'Nomia_melanderi'}, {'id': 3813, 'synset': 'leaf-cutting_bee.n.01', 'name': 'leaf-cutting_bee'}, {'id': 3814, 'synset': 'mason_bee.n.01', 'name': 'mason_bee'}, {'id': 3815, 'synset': 'potter_bee.n.01', 'name': 'potter_bee'}, {'id': 3816, 'synset': 'wasp.n.02', 'name': 'wasp'}, {'id': 3817, 'synset': 'vespid.n.01', 'name': 'vespid'}, {'id': 3818, 'synset': 'paper_wasp.n.01', 'name': 'paper_wasp'}, {'id': 3819, 'synset': 'giant_hornet.n.01', 'name': 'giant_hornet'}, {'id': 3820, 'synset': 'common_wasp.n.01', 'name': 'common_wasp'}, {'id': 3821, 'synset': 'bald-faced_hornet.n.01', 'name': 'bald-faced_hornet'}, {'id': 3822, 'synset': 'yellow_jacket.n.02', 'name': 'yellow_jacket'}, {'id': 3823, 'synset': 'polistes_annularis.n.01', 'name': 'Polistes_annularis'}, {'id': 3824, 'synset': 'mason_wasp.n.02', 'name': 'mason_wasp'}, {'id': 3825, 'synset': 'potter_wasp.n.01', 'name': 'potter_wasp'}, {'id': 3826, 'synset': 'mutillidae.n.01', 'name': 'Mutillidae'}, {'id': 3827, 'synset': 'velvet_ant.n.01', 'name': 'velvet_ant'}, {'id': 3828, 'synset': 'sphecoid_wasp.n.01', 'name': 'sphecoid_wasp'}, {'id': 3829, 'synset': 'mason_wasp.n.01', 'name': 'mason_wasp'}, {'id': 3830, 'synset': 'digger_wasp.n.01', 'name': 'digger_wasp'}, {'id': 3831, 'synset': 'cicada_killer.n.01', 'name': 'cicada_killer'}, {'id': 3832, 'synset': 'mud_dauber.n.01', 'name': 'mud_dauber'}, {'id': 3833, 'synset': 'gall_wasp.n.01', 'name': 'gall_wasp'}, {'id': 3834, 'synset': 'chalcid_fly.n.01', 'name': 'chalcid_fly'}, {'id': 3835, 'synset': 'strawworm.n.02', 'name': 'strawworm'}, {'id': 3836, 'synset': 'chalcis_fly.n.01', 'name': 'chalcis_fly'}, {'id': 3837, 'synset': 'ichneumon_fly.n.01', 'name': 'ichneumon_fly'}, {'id': 3838, 'synset': 'sawfly.n.01', 'name': 'sawfly'}, {'id': 3839, 'synset': 'birch_leaf_miner.n.01', 'name': 'birch_leaf_miner'}, {'id': 3840, 'synset': 'ant.n.01', 'name': 'ant'}, {'id': 3841, 'synset': 'pharaoh_ant.n.01', 'name': 'pharaoh_ant'}, {'id': 3842, 'synset': 'little_black_ant.n.01', 'name': 'little_black_ant'}, {'id': 3843, 'synset': 'army_ant.n.01', 'name': 'army_ant'}, {'id': 3844, 'synset': 'carpenter_ant.n.01', 'name': 'carpenter_ant'}, {'id': 3845, 'synset': 'fire_ant.n.01', 'name': 'fire_ant'}, {'id': 3846, 'synset': 'wood_ant.n.01', 'name': 'wood_ant'}, {'id': 3847, 'synset': 'slave_ant.n.01', 'name': 'slave_ant'}, {'id': 3848, 'synset': 'formica_fusca.n.01', 'name': 'Formica_fusca'}, {'id': 3849, 'synset': 'slave-making_ant.n.01', 'name': 'slave-making_ant'}, {'id': 3850, 'synset': 'sanguinary_ant.n.01', 'name': 'sanguinary_ant'}, {'id': 3851, 'synset': 'bulldog_ant.n.01', 'name': 'bulldog_ant'}, {'id': 3852, 'synset': 'amazon_ant.n.01', 'name': 'Amazon_ant'}, {'id': 3853, 'synset': 'termite.n.01', 'name': 'termite'}, {'id': 3854, 'synset': 'dry-wood_termite.n.01', 'name': 'dry-wood_termite'}, {'id': 3855, 'synset': 'reticulitermes_lucifugus.n.01', 'name': 'Reticulitermes_lucifugus'}, {'id': 3856, 'synset': 'mastotermes_darwiniensis.n.01', 'name': 'Mastotermes_darwiniensis'}, {'id': 3857, 'synset': 'mastotermes_electrodominicus.n.01', 'name': 'Mastotermes_electrodominicus'}, {'id': 3858, 'synset': 'powder-post_termite.n.01', 'name': 'powder-post_termite'}, {'id': 3859, 'synset': 'orthopterous_insect.n.01', 'name': 'orthopterous_insect'}, {'id': 3860, 'synset': 'grasshopper.n.01', 'name': 'grasshopper'}, {'id': 3861, 'synset': 'short-horned_grasshopper.n.01', 'name': 'short-horned_grasshopper'}, {'id': 3862, 'synset': 'locust.n.01', 'name': 'locust'}, {'id': 3863, 'synset': 'migratory_locust.n.01', 'name': 'migratory_locust'}, {'id': 3864, 'synset': 'migratory_grasshopper.n.01', 'name': 'migratory_grasshopper'}, {'id': 3865, 'synset': 'long-horned_grasshopper.n.01', 'name': 'long-horned_grasshopper'}, {'id': 3866, 'synset': 'katydid.n.01', 'name': 'katydid'}, {'id': 3867, 'synset': 'mormon_cricket.n.01', 'name': 'mormon_cricket'}, {'id': 3868, 'synset': 'sand_cricket.n.01', 'name': 'sand_cricket'}, {'id': 3869, 'synset': 'cricket.n.01', 'name': 'cricket'}, {'id': 3870, 'synset': 'mole_cricket.n.01', 'name': 'mole_cricket'}, {'id': 3871, 'synset': 'european_house_cricket.n.01', 'name': 'European_house_cricket'}, {'id': 3872, 'synset': 'field_cricket.n.01', 'name': 'field_cricket'}, {'id': 3873, 'synset': 'tree_cricket.n.01', 'name': 'tree_cricket'}, {'id': 3874, 'synset': 'snowy_tree_cricket.n.01', 'name': 'snowy_tree_cricket'}, {'id': 3875, 'synset': 'phasmid.n.01', 'name': 'phasmid'}, {'id': 3876, 'synset': 'walking_stick.n.02', 'name': 'walking_stick'}, {'id': 3877, 'synset': 'diapheromera.n.01', 'name': 'diapheromera'}, {'id': 3878, 'synset': 'walking_leaf.n.02', 'name': 'walking_leaf'}, {'id': 3879, 'synset': 'oriental_cockroach.n.01', 'name': 'oriental_cockroach'}, {'id': 3880, 'synset': 'american_cockroach.n.01', 'name': 'American_cockroach'}, {'id': 3881, 'synset': 'australian_cockroach.n.01', 'name': 'Australian_cockroach'}, {'id': 3882, 'synset': 'german_cockroach.n.01', 'name': 'German_cockroach'}, {'id': 3883, 'synset': 'giant_cockroach.n.01', 'name': 'giant_cockroach'}, {'id': 3884, 'synset': 'mantis.n.01', 'name': 'mantis'}, {'id': 3885, 'synset': 'praying_mantis.n.01', 'name': 'praying_mantis'}, {'id': 3886, 'synset': 'bug.n.01', 'name': 'bug'}, {'id': 3887, 'synset': 'hemipterous_insect.n.01', 'name': 'hemipterous_insect'}, {'id': 3888, 'synset': 'leaf_bug.n.01', 'name': 'leaf_bug'}, {'id': 3889, 'synset': 'mirid_bug.n.01', 'name': 'mirid_bug'}, {'id': 3890, 'synset': 'four-lined_plant_bug.n.01', 'name': 'four-lined_plant_bug'}, {'id': 3891, 'synset': 'lygus_bug.n.01', 'name': 'lygus_bug'}, {'id': 3892, 'synset': 'tarnished_plant_bug.n.01', 'name': 'tarnished_plant_bug'}, {'id': 3893, 'synset': 'lace_bug.n.01', 'name': 'lace_bug'}, {'id': 3894, 'synset': 'lygaeid.n.01', 'name': 'lygaeid'}, {'id': 3895, 'synset': 'chinch_bug.n.01', 'name': 'chinch_bug'}, {'id': 3896, 'synset': 'coreid_bug.n.01', 'name': 'coreid_bug'}, {'id': 3897, 'synset': 'squash_bug.n.01', 'name': 'squash_bug'}, {'id': 3898, 'synset': 'leaf-footed_bug.n.01', 'name': 'leaf-footed_bug'}, {'id': 3899, 'synset': 'bedbug.n.01', 'name': 'bedbug'}, {'id': 3900, 'synset': 'backswimmer.n.01', 'name': 'backswimmer'}, {'id': 3901, 'synset': 'true_bug.n.01', 'name': 'true_bug'}, {'id': 3902, 'synset': 'heteropterous_insect.n.01', 'name': 'heteropterous_insect'}, {'id': 3903, 'synset': 'water_bug.n.01', 'name': 'water_bug'}, {'id': 3904, 'synset': 'giant_water_bug.n.01', 'name': 'giant_water_bug'}, {'id': 3905, 'synset': 'water_scorpion.n.01', 'name': 'water_scorpion'}, {'id': 3906, 'synset': 'water_boatman.n.01', 'name': 'water_boatman'}, {'id': 3907, 'synset': 'water_strider.n.01', 'name': 'water_strider'}, {'id': 3908, 'synset': 'common_pond-skater.n.01', 'name': 'common_pond-skater'}, {'id': 3909, 'synset': 'assassin_bug.n.01', 'name': 'assassin_bug'}, {'id': 3910, 'synset': 'conenose.n.01', 'name': 'conenose'}, {'id': 3911, 'synset': 'wheel_bug.n.01', 'name': 'wheel_bug'}, {'id': 3912, 'synset': 'firebug.n.02', 'name': 'firebug'}, {'id': 3913, 'synset': 'cotton_stainer.n.01', 'name': 'cotton_stainer'}, {'id': 3914, 'synset': 'homopterous_insect.n.01', 'name': 'homopterous_insect'}, {'id': 3915, 'synset': 'whitefly.n.01', 'name': 'whitefly'}, {'id': 3916, 'synset': 'citrus_whitefly.n.01', 'name': 'citrus_whitefly'}, {'id': 3917, 'synset': 'greenhouse_whitefly.n.01', 'name': 'greenhouse_whitefly'}, {'id': 3918, 'synset': 'sweet-potato_whitefly.n.01', 'name': 'sweet-potato_whitefly'}, {'id': 3919, 'synset': 'superbug.n.02', 'name': 'superbug'}, {'id': 3920, 'synset': 'cotton_strain.n.01', 'name': 'cotton_strain'}, {'id': 3921, 'synset': 'coccid_insect.n.01', 'name': 'coccid_insect'}, {'id': 3922, 'synset': 'scale_insect.n.01', 'name': 'scale_insect'}, {'id': 3923, 'synset': 'soft_scale.n.01', 'name': 'soft_scale'}, {'id': 3924, 'synset': 'brown_soft_scale.n.01', 'name': 'brown_soft_scale'}, {'id': 3925, 'synset': 'armored_scale.n.01', 'name': 'armored_scale'}, {'id': 3926, 'synset': 'san_jose_scale.n.01', 'name': 'San_Jose_scale'}, {'id': 3927, 'synset': 'cochineal_insect.n.01', 'name': 'cochineal_insect'}, {'id': 3928, 'synset': 'mealybug.n.01', 'name': 'mealybug'}, {'id': 3929, 'synset': 'citrophilous_mealybug.n.01', 'name': 'citrophilous_mealybug'}, {'id': 3930, 'synset': 'comstock_mealybug.n.01', 'name': 'Comstock_mealybug'}, {'id': 3931, 'synset': 'citrus_mealybug.n.01', 'name': 'citrus_mealybug'}, {'id': 3932, 'synset': 'plant_louse.n.01', 'name': 'plant_louse'}, {'id': 3933, 'synset': 'aphid.n.01', 'name': 'aphid'}, {'id': 3934, 'synset': 'apple_aphid.n.01', 'name': 'apple_aphid'}, {'id': 3935, 'synset': 'blackfly.n.01', 'name': 'blackfly'}, {'id': 3936, 'synset': 'greenfly.n.01', 'name': 'greenfly'}, {'id': 3937, 'synset': 'green_peach_aphid.n.01', 'name': 'green_peach_aphid'}, {'id': 3938, 'synset': 'ant_cow.n.01', 'name': 'ant_cow'}, {'id': 3939, 'synset': 'woolly_aphid.n.01', 'name': 'woolly_aphid'}, {'id': 3940, 'synset': 'woolly_apple_aphid.n.01', 'name': 'woolly_apple_aphid'}, {'id': 3941, 'synset': 'woolly_alder_aphid.n.01', 'name': 'woolly_alder_aphid'}, {'id': 3942, 'synset': 'adelgid.n.01', 'name': 'adelgid'}, {'id': 3943, 'synset': 'balsam_woolly_aphid.n.01', 'name': 'balsam_woolly_aphid'}, {'id': 3944, 'synset': 'spruce_gall_aphid.n.01', 'name': 'spruce_gall_aphid'}, {'id': 3945, 'synset': 'woolly_adelgid.n.01', 'name': 'woolly_adelgid'}, {'id': 3946, 'synset': 'jumping_plant_louse.n.01', 'name': 'jumping_plant_louse'}, {'id': 3947, 'synset': 'cicada.n.01', 'name': 'cicada'}, {'id': 3948, 'synset': 'dog-day_cicada.n.01', 'name': 'dog-day_cicada'}, {'id': 3949, 'synset': 'seventeen-year_locust.n.01', 'name': 'seventeen-year_locust'}, {'id': 3950, 'synset': 'spittle_insect.n.01', 'name': 'spittle_insect'}, {'id': 3951, 'synset': 'froghopper.n.01', 'name': 'froghopper'}, {'id': 3952, 'synset': 'meadow_spittlebug.n.01', 'name': 'meadow_spittlebug'}, {'id': 3953, 'synset': 'pine_spittlebug.n.01', 'name': 'pine_spittlebug'}, {'id': 3954, 'synset': 'saratoga_spittlebug.n.01', 'name': 'Saratoga_spittlebug'}, {'id': 3955, 'synset': 'leafhopper.n.01', 'name': 'leafhopper'}, {'id': 3956, 'synset': 'plant_hopper.n.01', 'name': 'plant_hopper'}, {'id': 3957, 'synset': 'treehopper.n.01', 'name': 'treehopper'}, {'id': 3958, 'synset': 'lantern_fly.n.01', 'name': 'lantern_fly'}, {'id': 3959, 'synset': 'psocopterous_insect.n.01', 'name': 'psocopterous_insect'}, {'id': 3960, 'synset': 'psocid.n.01', 'name': 'psocid'}, {'id': 3961, 'synset': 'bark-louse.n.01', 'name': 'bark-louse'}, {'id': 3962, 'synset': 'booklouse.n.01', 'name': 'booklouse'}, {'id': 3963, 'synset': 'common_booklouse.n.01', 'name': 'common_booklouse'}, {'id': 3964, 'synset': 'ephemerid.n.01', 'name': 'ephemerid'}, {'id': 3965, 'synset': 'mayfly.n.01', 'name': 'mayfly'}, {'id': 3966, 'synset': 'stonefly.n.01', 'name': 'stonefly'}, {'id': 3967, 'synset': 'neuropteron.n.01', 'name': 'neuropteron'}, {'id': 3968, 'synset': 'ant_lion.n.02', 'name': 'ant_lion'}, {'id': 3969, 'synset': 'doodlebug.n.03', 'name': 'doodlebug'}, {'id': 3970, 'synset': 'lacewing.n.01', 'name': 'lacewing'}, {'id': 3971, 'synset': 'aphid_lion.n.01', 'name': 'aphid_lion'}, {'id': 3972, 'synset': 'green_lacewing.n.01', 'name': 'green_lacewing'}, {'id': 3973, 'synset': 'brown_lacewing.n.01', 'name': 'brown_lacewing'}, {'id': 3974, 'synset': 'dobson.n.02', 'name': 'dobson'}, {'id': 3975, 'synset': 'hellgrammiate.n.01', 'name': 'hellgrammiate'}, {'id': 3976, 'synset': 'fish_fly.n.01', 'name': 'fish_fly'}, {'id': 3977, 'synset': 'alderfly.n.01', 'name': 'alderfly'}, {'id': 3978, 'synset': 'snakefly.n.01', 'name': 'snakefly'}, {'id': 3979, 'synset': 'mantispid.n.01', 'name': 'mantispid'}, {'id': 3980, 'synset': 'odonate.n.01', 'name': 'odonate'}, {'id': 3981, 'synset': 'damselfly.n.01', 'name': 'damselfly'}, {'id': 3982, 'synset': 'trichopterous_insect.n.01', 'name': 'trichopterous_insect'}, {'id': 3983, 'synset': 'caddis_fly.n.01', 'name': 'caddis_fly'}, {'id': 3984, 'synset': 'caseworm.n.01', 'name': 'caseworm'}, {'id': 3985, 'synset': 'caddisworm.n.01', 'name': 'caddisworm'}, {'id': 3986, 'synset': 'thysanuran_insect.n.01', 'name': 'thysanuran_insect'}, {'id': 3987, 'synset': 'bristletail.n.01', 'name': 'bristletail'}, {'id': 3988, 'synset': 'silverfish.n.01', 'name': 'silverfish'}, {'id': 3989, 'synset': 'firebrat.n.01', 'name': 'firebrat'}, {'id': 3990, 'synset': 'jumping_bristletail.n.01', 'name': 'jumping_bristletail'}, {'id': 3991, 'synset': 'thysanopter.n.01', 'name': 'thysanopter'}, {'id': 3992, 'synset': 'thrips.n.01', 'name': 'thrips'}, {'id': 3993, 'synset': 'tobacco_thrips.n.01', 'name': 'tobacco_thrips'}, {'id': 3994, 'synset': 'onion_thrips.n.01', 'name': 'onion_thrips'}, {'id': 3995, 'synset': 'earwig.n.01', 'name': 'earwig'}, {'id': 3996, 'synset': 'common_european_earwig.n.01', 'name': 'common_European_earwig'}, {'id': 3997, 'synset': 'lepidopterous_insect.n.01', 'name': 'lepidopterous_insect'}, {'id': 3998, 'synset': 'nymphalid.n.01', 'name': 'nymphalid'}, {'id': 3999, 'synset': 'mourning_cloak.n.01', 'name': 'mourning_cloak'}, {'id': 4000, 'synset': 'tortoiseshell.n.02', 'name': 'tortoiseshell'}, {'id': 4001, 'synset': 'painted_beauty.n.01', 'name': 'painted_beauty'}, {'id': 4002, 'synset': 'admiral.n.02', 'name': 'admiral'}, {'id': 4003, 'synset': 'red_admiral.n.01', 'name': 'red_admiral'}, {'id': 4004, 'synset': 'white_admiral.n.02', 'name': 'white_admiral'}, {'id': 4005, 'synset': 'banded_purple.n.01', 'name': 'banded_purple'}, {'id': 4006, 'synset': 'red-spotted_purple.n.01', 'name': 'red-spotted_purple'}, {'id': 4007, 'synset': 'viceroy.n.02', 'name': 'viceroy'}, {'id': 4008, 'synset': 'anglewing.n.01', 'name': 'anglewing'}, {'id': 4009, 'synset': 'ringlet.n.04', 'name': 'ringlet'}, {'id': 4010, 'synset': 'comma.n.02', 'name': 'comma'}, {'id': 4011, 'synset': 'fritillary.n.02', 'name': 'fritillary'}, {'id': 4012, 'synset': 'silverspot.n.01', 'name': 'silverspot'}, {'id': 4013, 'synset': 'emperor_butterfly.n.01', 'name': 'emperor_butterfly'}, {'id': 4014, 'synset': 'purple_emperor.n.01', 'name': 'purple_emperor'}, {'id': 4015, 'synset': 'peacock.n.01', 'name': 'peacock'}, {'id': 4016, 'synset': 'danaid.n.01', 'name': 'danaid'}, {'id': 4017, 'synset': 'monarch.n.02', 'name': 'monarch'}, {'id': 4018, 'synset': 'pierid.n.01', 'name': 'pierid'}, {'id': 4019, 'synset': 'cabbage_butterfly.n.01', 'name': 'cabbage_butterfly'}, {'id': 4020, 'synset': 'small_white.n.01', 'name': 'small_white'}, {'id': 4021, 'synset': 'large_white.n.01', 'name': 'large_white'}, {'id': 4022, 'synset': 'southern_cabbage_butterfly.n.01', 'name': 'southern_cabbage_butterfly'}, {'id': 4023, 'synset': 'sulphur_butterfly.n.01', 'name': 'sulphur_butterfly'}, {'id': 4024, 'synset': 'lycaenid.n.01', 'name': 'lycaenid'}, {'id': 4025, 'synset': 'blue.n.07', 'name': 'blue'}, {'id': 4026, 'synset': 'copper.n.05', 'name': 'copper'}, {'id': 4027, 'synset': 'american_copper.n.01', 'name': 'American_copper'}, {'id': 4028, 'synset': 'hairstreak.n.01', 'name': 'hairstreak'}, {'id': 4029, 'synset': 'strymon_melinus.n.01', 'name': 'Strymon_melinus'}, {'id': 4030, 'synset': 'moth.n.01', 'name': 'moth'}, {'id': 4031, 'synset': 'moth_miller.n.01', 'name': 'moth_miller'}, {'id': 4032, 'synset': 'tortricid.n.01', 'name': 'tortricid'}, {'id': 4033, 'synset': 'leaf_roller.n.01', 'name': 'leaf_roller'}, {'id': 4034, 'synset': 'tea_tortrix.n.01', 'name': 'tea_tortrix'}, {'id': 4035, 'synset': 'orange_tortrix.n.01', 'name': 'orange_tortrix'}, {'id': 4036, 'synset': 'codling_moth.n.01', 'name': 'codling_moth'}, {'id': 4037, 'synset': 'lymantriid.n.01', 'name': 'lymantriid'}, {'id': 4038, 'synset': 'tussock_caterpillar.n.01', 'name': 'tussock_caterpillar'}, {'id': 4039, 'synset': 'gypsy_moth.n.01', 'name': 'gypsy_moth'}, {'id': 4040, 'synset': 'browntail.n.01', 'name': 'browntail'}, {'id': 4041, 'synset': 'gold-tail_moth.n.01', 'name': 'gold-tail_moth'}, {'id': 4042, 'synset': 'geometrid.n.01', 'name': 'geometrid'}, {'id': 4043, 'synset': 'paleacrita_vernata.n.01', 'name': 'Paleacrita_vernata'}, {'id': 4044, 'synset': 'alsophila_pometaria.n.01', 'name': 'Alsophila_pometaria'}, {'id': 4045, 'synset': 'cankerworm.n.01', 'name': 'cankerworm'}, {'id': 4046, 'synset': 'spring_cankerworm.n.01', 'name': 'spring_cankerworm'}, {'id': 4047, 'synset': 'fall_cankerworm.n.01', 'name': 'fall_cankerworm'}, {'id': 4048, 'synset': 'measuring_worm.n.01', 'name': 'measuring_worm'}, {'id': 4049, 'synset': 'pyralid.n.01', 'name': 'pyralid'}, {'id': 4050, 'synset': 'bee_moth.n.01', 'name': 'bee_moth'}, {'id': 4051, 'synset': 'corn_borer.n.02', 'name': 'corn_borer'}, {'id': 4052, 'synset': 'mediterranean_flour_moth.n.01', 'name': 'Mediterranean_flour_moth'}, {'id': 4053, 'synset': 'tobacco_moth.n.01', 'name': 'tobacco_moth'}, {'id': 4054, 'synset': 'almond_moth.n.01', 'name': 'almond_moth'}, {'id': 4055, 'synset': 'raisin_moth.n.01', 'name': 'raisin_moth'}, {'id': 4056, 'synset': 'tineoid.n.01', 'name': 'tineoid'}, {'id': 4057, 'synset': 'tineid.n.01', 'name': 'tineid'}, {'id': 4058, 'synset': 'clothes_moth.n.01', 'name': 'clothes_moth'}, {'id': 4059, 'synset': 'casemaking_clothes_moth.n.01', 'name': 'casemaking_clothes_moth'}, {'id': 4060, 'synset': 'webbing_clothes_moth.n.01', 'name': 'webbing_clothes_moth'}, {'id': 4061, 'synset': 'carpet_moth.n.01', 'name': 'carpet_moth'}, {'id': 4062, 'synset': 'gelechiid.n.01', 'name': 'gelechiid'}, {'id': 4063, 'synset': 'grain_moth.n.01', 'name': 'grain_moth'}, {'id': 4064, 'synset': 'angoumois_moth.n.01', 'name': 'angoumois_moth'}, {'id': 4065, 'synset': 'potato_moth.n.01', 'name': 'potato_moth'}, {'id': 4066, 'synset': 'potato_tuberworm.n.01', 'name': 'potato_tuberworm'}, {'id': 4067, 'synset': 'noctuid_moth.n.01', 'name': 'noctuid_moth'}, {'id': 4068, 'synset': 'cutworm.n.01', 'name': 'cutworm'}, {'id': 4069, 'synset': 'underwing.n.01', 'name': 'underwing'}, {'id': 4070, 'synset': 'red_underwing.n.01', 'name': 'red_underwing'}, {'id': 4071, 'synset': 'antler_moth.n.01', 'name': 'antler_moth'}, {'id': 4072, 'synset': 'heliothis_moth.n.01', 'name': 'heliothis_moth'}, {'id': 4073, 'synset': 'army_cutworm.n.01', 'name': 'army_cutworm'}, {'id': 4074, 'synset': 'armyworm.n.02', 'name': 'armyworm'}, {'id': 4075, 'synset': 'armyworm.n.01', 'name': 'armyworm'}, {'id': 4076, 'synset': 'spodoptera_exigua.n.02', 'name': 'Spodoptera_exigua'}, {'id': 4077, 'synset': 'beet_armyworm.n.01', 'name': 'beet_armyworm'}, {'id': 4078, 'synset': 'spodoptera_frugiperda.n.02', 'name': 'Spodoptera_frugiperda'}, {'id': 4079, 'synset': 'fall_armyworm.n.01', 'name': 'fall_armyworm'}, {'id': 4080, 'synset': 'hawkmoth.n.01', 'name': 'hawkmoth'}, {'id': 4081, 'synset': 'manduca_sexta.n.02', 'name': 'Manduca_sexta'}, {'id': 4082, 'synset': 'tobacco_hornworm.n.01', 'name': 'tobacco_hornworm'}, {'id': 4083, 'synset': 'manduca_quinquemaculata.n.02', 'name': 'Manduca_quinquemaculata'}, {'id': 4084, 'synset': 'tomato_hornworm.n.01', 'name': 'tomato_hornworm'}, {'id': 4085, 'synset': "death's-head_moth.n.01", 'name': "death's-head_moth"}, {'id': 4086, 'synset': 'bombycid.n.01', 'name': 'bombycid'}, {'id': 4087, 'synset': 'domestic_silkworm_moth.n.01', 'name': 'domestic_silkworm_moth'}, {'id': 4088, 'synset': 'silkworm.n.01', 'name': 'silkworm'}, {'id': 4089, 'synset': 'saturniid.n.01', 'name': 'saturniid'}, {'id': 4090, 'synset': 'emperor.n.03', 'name': 'emperor'}, {'id': 4091, 'synset': 'imperial_moth.n.01', 'name': 'imperial_moth'}, {'id': 4092, 'synset': 'giant_silkworm_moth.n.01', 'name': 'giant_silkworm_moth'}, {'id': 4093, 'synset': 'silkworm.n.02', 'name': 'silkworm'}, {'id': 4094, 'synset': 'luna_moth.n.01', 'name': 'luna_moth'}, {'id': 4095, 'synset': 'cecropia.n.02', 'name': 'cecropia'}, {'id': 4096, 'synset': 'cynthia_moth.n.01', 'name': 'cynthia_moth'}, {'id': 4097, 'synset': 'ailanthus_silkworm.n.01', 'name': 'ailanthus_silkworm'}, {'id': 4098, 'synset': 'io_moth.n.01', 'name': 'io_moth'}, {'id': 4099, 'synset': 'polyphemus_moth.n.01', 'name': 'polyphemus_moth'}, {'id': 4100, 'synset': 'pernyi_moth.n.01', 'name': 'pernyi_moth'}, {'id': 4101, 'synset': 'tussah.n.01', 'name': 'tussah'}, {'id': 4102, 'synset': 'atlas_moth.n.01', 'name': 'atlas_moth'}, {'id': 4103, 'synset': 'arctiid.n.01', 'name': 'arctiid'}, {'id': 4104, 'synset': 'tiger_moth.n.01', 'name': 'tiger_moth'}, {'id': 4105, 'synset': 'cinnabar.n.02', 'name': 'cinnabar'}, {'id': 4106, 'synset': 'lasiocampid.n.01', 'name': 'lasiocampid'}, {'id': 4107, 'synset': 'eggar.n.01', 'name': 'eggar'}, {'id': 4108, 'synset': 'tent-caterpillar_moth.n.02', 'name': 'tent-caterpillar_moth'}, {'id': 4109, 'synset': 'tent_caterpillar.n.01', 'name': 'tent_caterpillar'}, {'id': 4110, 'synset': 'tent-caterpillar_moth.n.01', 'name': 'tent-caterpillar_moth'}, {'id': 4111, 'synset': 'forest_tent_caterpillar.n.01', 'name': 'forest_tent_caterpillar'}, {'id': 4112, 'synset': 'lappet.n.03', 'name': 'lappet'}, {'id': 4113, 'synset': 'lappet_caterpillar.n.01', 'name': 'lappet_caterpillar'}, {'id': 4114, 'synset': 'webworm.n.01', 'name': 'webworm'}, {'id': 4115, 'synset': 'webworm_moth.n.01', 'name': 'webworm_moth'}, {'id': 4116, 'synset': 'hyphantria_cunea.n.02', 'name': 'Hyphantria_cunea'}, {'id': 4117, 'synset': 'fall_webworm.n.01', 'name': 'fall_webworm'}, {'id': 4118, 'synset': 'garden_webworm.n.01', 'name': 'garden_webworm'}, {'id': 4119, 'synset': 'instar.n.01', 'name': 'instar'}, {'id': 4120, 'synset': 'caterpillar.n.01', 'name': 'caterpillar'}, {'id': 4121, 'synset': 'corn_borer.n.01', 'name': 'corn_borer'}, {'id': 4122, 'synset': 'bollworm.n.01', 'name': 'bollworm'}, {'id': 4123, 'synset': 'pink_bollworm.n.01', 'name': 'pink_bollworm'}, {'id': 4124, 'synset': 'corn_earworm.n.01', 'name': 'corn_earworm'}, {'id': 4125, 'synset': 'cabbageworm.n.01', 'name': 'cabbageworm'}, {'id': 4126, 'synset': 'woolly_bear.n.01', 'name': 'woolly_bear'}, {'id': 4127, 'synset': 'woolly_bear_moth.n.01', 'name': 'woolly_bear_moth'}, {'id': 4128, 'synset': 'larva.n.01', 'name': 'larva'}, {'id': 4129, 'synset': 'nymph.n.02', 'name': 'nymph'}, {'id': 4130, 'synset': 'leptocephalus.n.01', 'name': 'leptocephalus'}, {'id': 4131, 'synset': 'grub.n.02', 'name': 'grub'}, {'id': 4132, 'synset': 'maggot.n.01', 'name': 'maggot'}, {'id': 4133, 'synset': 'leatherjacket.n.03', 'name': 'leatherjacket'}, {'id': 4134, 'synset': 'pupa.n.01', 'name': 'pupa'}, {'id': 4135, 'synset': 'chrysalis.n.01', 'name': 'chrysalis'}, {'id': 4136, 'synset': 'imago.n.02', 'name': 'imago'}, {'id': 4137, 'synset': 'queen.n.01', 'name': 'queen'}, {'id': 4138, 'synset': 'phoronid.n.01', 'name': 'phoronid'}, {'id': 4139, 'synset': 'bryozoan.n.01', 'name': 'bryozoan'}, {'id': 4140, 'synset': 'brachiopod.n.01', 'name': 'brachiopod'}, {'id': 4141, 'synset': 'peanut_worm.n.01', 'name': 'peanut_worm'}, {'id': 4142, 'synset': 'echinoderm.n.01', 'name': 'echinoderm'}, {'id': 4143, 'synset': 'brittle_star.n.01', 'name': 'brittle_star'}, {'id': 4144, 'synset': 'basket_star.n.01', 'name': 'basket_star'}, {'id': 4145, 'synset': 'astrophyton_muricatum.n.01', 'name': 'Astrophyton_muricatum'}, {'id': 4146, 'synset': 'sea_urchin.n.01', 'name': 'sea_urchin'}, {'id': 4147, 'synset': 'edible_sea_urchin.n.01', 'name': 'edible_sea_urchin'}, {'id': 4148, 'synset': 'sand_dollar.n.01', 'name': 'sand_dollar'}, {'id': 4149, 'synset': 'heart_urchin.n.01', 'name': 'heart_urchin'}, {'id': 4150, 'synset': 'crinoid.n.01', 'name': 'crinoid'}, {'id': 4151, 'synset': 'sea_lily.n.01', 'name': 'sea_lily'}, {'id': 4152, 'synset': 'feather_star.n.01', 'name': 'feather_star'}, {'id': 4153, 'synset': 'sea_cucumber.n.01', 'name': 'sea_cucumber'}, {'id': 4154, 'synset': 'trepang.n.01', 'name': 'trepang'}, {'id': 4155, 'synset': 'duplicidentata.n.01', 'name': 'Duplicidentata'}, {'id': 4156, 'synset': 'lagomorph.n.01', 'name': 'lagomorph'}, {'id': 4157, 'synset': 'leporid.n.01', 'name': 'leporid'}, {'id': 4158, 'synset': 'rabbit_ears.n.02', 'name': 'rabbit_ears'}, {'id': 4159, 'synset': 'lapin.n.02', 'name': 'lapin'}, {'id': 4160, 'synset': 'bunny.n.02', 'name': 'bunny'}, {'id': 4161, 'synset': 'european_rabbit.n.01', 'name': 'European_rabbit'}, {'id': 4162, 'synset': 'wood_rabbit.n.01', 'name': 'wood_rabbit'}, {'id': 4163, 'synset': 'eastern_cottontail.n.01', 'name': 'eastern_cottontail'}, {'id': 4164, 'synset': 'swamp_rabbit.n.02', 'name': 'swamp_rabbit'}, {'id': 4165, 'synset': 'marsh_hare.n.01', 'name': 'marsh_hare'}, {'id': 4166, 'synset': 'hare.n.01', 'name': 'hare'}, {'id': 4167, 'synset': 'leveret.n.01', 'name': 'leveret'}, {'id': 4168, 'synset': 'european_hare.n.01', 'name': 'European_hare'}, {'id': 4169, 'synset': 'jackrabbit.n.01', 'name': 'jackrabbit'}, {'id': 4170, 'synset': 'white-tailed_jackrabbit.n.01', 'name': 'white-tailed_jackrabbit'}, {'id': 4171, 'synset': 'blacktail_jackrabbit.n.01', 'name': 'blacktail_jackrabbit'}, {'id': 4172, 'synset': 'polar_hare.n.01', 'name': 'polar_hare'}, {'id': 4173, 'synset': 'snowshoe_hare.n.01', 'name': 'snowshoe_hare'}, {'id': 4174, 'synset': 'belgian_hare.n.01', 'name': 'Belgian_hare'}, {'id': 4175, 'synset': 'angora.n.03', 'name': 'Angora'}, {'id': 4176, 'synset': 'pika.n.01', 'name': 'pika'}, {'id': 4177, 'synset': 'little_chief_hare.n.01', 'name': 'little_chief_hare'}, {'id': 4178, 'synset': 'collared_pika.n.01', 'name': 'collared_pika'}, {'id': 4179, 'synset': 'mouse.n.01', 'name': 'mouse'}, {'id': 4180, 'synset': 'pocket_rat.n.01', 'name': 'pocket_rat'}, {'id': 4181, 'synset': 'murine.n.01', 'name': 'murine'}, {'id': 4182, 'synset': 'house_mouse.n.01', 'name': 'house_mouse'}, {'id': 4183, 'synset': 'harvest_mouse.n.02', 'name': 'harvest_mouse'}, {'id': 4184, 'synset': 'field_mouse.n.02', 'name': 'field_mouse'}, {'id': 4185, 'synset': 'nude_mouse.n.01', 'name': 'nude_mouse'}, {'id': 4186, 'synset': 'european_wood_mouse.n.01', 'name': 'European_wood_mouse'}, {'id': 4187, 'synset': 'brown_rat.n.01', 'name': 'brown_rat'}, {'id': 4188, 'synset': 'wharf_rat.n.02', 'name': 'wharf_rat'}, {'id': 4189, 'synset': 'sewer_rat.n.01', 'name': 'sewer_rat'}, {'id': 4190, 'synset': 'black_rat.n.01', 'name': 'black_rat'}, {'id': 4191, 'synset': 'bandicoot_rat.n.01', 'name': 'bandicoot_rat'}, {'id': 4192, 'synset': 'jerboa_rat.n.01', 'name': 'jerboa_rat'}, {'id': 4193, 'synset': 'kangaroo_mouse.n.02', 'name': 'kangaroo_mouse'}, {'id': 4194, 'synset': 'water_rat.n.03', 'name': 'water_rat'}, {'id': 4195, 'synset': 'beaver_rat.n.01', 'name': 'beaver_rat'}, {'id': 4196, 'synset': 'new_world_mouse.n.01', 'name': 'New_World_mouse'}, {'id': 4197, 'synset': 'american_harvest_mouse.n.01', 'name': 'American_harvest_mouse'}, {'id': 4198, 'synset': 'wood_mouse.n.01', 'name': 'wood_mouse'}, {'id': 4199, 'synset': 'white-footed_mouse.n.01', 'name': 'white-footed_mouse'}, {'id': 4200, 'synset': 'deer_mouse.n.01', 'name': 'deer_mouse'}, {'id': 4201, 'synset': 'cactus_mouse.n.01', 'name': 'cactus_mouse'}, {'id': 4202, 'synset': 'cotton_mouse.n.01', 'name': 'cotton_mouse'}, {'id': 4203, 'synset': 'pygmy_mouse.n.01', 'name': 'pygmy_mouse'}, {'id': 4204, 'synset': 'grasshopper_mouse.n.01', 'name': 'grasshopper_mouse'}, {'id': 4205, 'synset': 'muskrat.n.02', 'name': 'muskrat'}, {'id': 4206, 'synset': 'round-tailed_muskrat.n.01', 'name': 'round-tailed_muskrat'}, {'id': 4207, 'synset': 'cotton_rat.n.01', 'name': 'cotton_rat'}, {'id': 4208, 'synset': 'wood_rat.n.01', 'name': 'wood_rat'}, {'id': 4209, 'synset': 'dusky-footed_wood_rat.n.01', 'name': 'dusky-footed_wood_rat'}, {'id': 4210, 'synset': 'vole.n.01', 'name': 'vole'}, {'id': 4211, 'synset': 'packrat.n.02', 'name': 'packrat'}, {'id': 4212, 'synset': 'dusky-footed_woodrat.n.01', 'name': 'dusky-footed_woodrat'}, {'id': 4213, 'synset': 'eastern_woodrat.n.01', 'name': 'eastern_woodrat'}, {'id': 4214, 'synset': 'rice_rat.n.01', 'name': 'rice_rat'}, {'id': 4215, 'synset': 'pine_vole.n.01', 'name': 'pine_vole'}, {'id': 4216, 'synset': 'meadow_vole.n.01', 'name': 'meadow_vole'}, {'id': 4217, 'synset': 'water_vole.n.02', 'name': 'water_vole'}, {'id': 4218, 'synset': 'prairie_vole.n.01', 'name': 'prairie_vole'}, {'id': 4219, 'synset': 'water_vole.n.01', 'name': 'water_vole'}, {'id': 4220, 'synset': 'red-backed_mouse.n.01', 'name': 'red-backed_mouse'}, {'id': 4221, 'synset': 'phenacomys.n.01', 'name': 'phenacomys'}, {'id': 4222, 'synset': 'eurasian_hamster.n.01', 'name': 'Eurasian_hamster'}, {'id': 4223, 'synset': 'golden_hamster.n.01', 'name': 'golden_hamster'}, {'id': 4224, 'synset': 'gerbil.n.01', 'name': 'gerbil'}, {'id': 4225, 'synset': 'jird.n.01', 'name': 'jird'}, {'id': 4226, 'synset': 'tamarisk_gerbil.n.01', 'name': 'tamarisk_gerbil'}, {'id': 4227, 'synset': 'sand_rat.n.02', 'name': 'sand_rat'}, {'id': 4228, 'synset': 'lemming.n.01', 'name': 'lemming'}, {'id': 4229, 'synset': 'european_lemming.n.01', 'name': 'European_lemming'}, {'id': 4230, 'synset': 'brown_lemming.n.01', 'name': 'brown_lemming'}, {'id': 4231, 'synset': 'grey_lemming.n.01', 'name': 'grey_lemming'}, {'id': 4232, 'synset': 'pied_lemming.n.01', 'name': 'pied_lemming'}, {'id': 4233, 'synset': 'hudson_bay_collared_lemming.n.01', 'name': 'Hudson_bay_collared_lemming'}, {'id': 4234, 'synset': 'southern_bog_lemming.n.01', 'name': 'southern_bog_lemming'}, {'id': 4235, 'synset': 'northern_bog_lemming.n.01', 'name': 'northern_bog_lemming'}, {'id': 4236, 'synset': 'porcupine.n.01', 'name': 'porcupine'}, {'id': 4237, 'synset': 'old_world_porcupine.n.01', 'name': 'Old_World_porcupine'}, {'id': 4238, 'synset': 'brush-tailed_porcupine.n.01', 'name': 'brush-tailed_porcupine'}, {'id': 4239, 'synset': 'long-tailed_porcupine.n.01', 'name': 'long-tailed_porcupine'}, {'id': 4240, 'synset': 'new_world_porcupine.n.01', 'name': 'New_World_porcupine'}, {'id': 4241, 'synset': 'canada_porcupine.n.01', 'name': 'Canada_porcupine'}, {'id': 4242, 'synset': 'pocket_mouse.n.01', 'name': 'pocket_mouse'}, {'id': 4243, 'synset': 'silky_pocket_mouse.n.01', 'name': 'silky_pocket_mouse'}, {'id': 4244, 'synset': 'plains_pocket_mouse.n.01', 'name': 'plains_pocket_mouse'}, {'id': 4245, 'synset': 'hispid_pocket_mouse.n.01', 'name': 'hispid_pocket_mouse'}, {'id': 4246, 'synset': 'mexican_pocket_mouse.n.01', 'name': 'Mexican_pocket_mouse'}, {'id': 4247, 'synset': 'kangaroo_rat.n.01', 'name': 'kangaroo_rat'}, {'id': 4248, 'synset': 'ord_kangaroo_rat.n.01', 'name': 'Ord_kangaroo_rat'}, {'id': 4249, 'synset': 'kangaroo_mouse.n.01', 'name': 'kangaroo_mouse'}, {'id': 4250, 'synset': 'jumping_mouse.n.01', 'name': 'jumping_mouse'}, {'id': 4251, 'synset': 'meadow_jumping_mouse.n.01', 'name': 'meadow_jumping_mouse'}, {'id': 4252, 'synset': 'jerboa.n.01', 'name': 'jerboa'}, {'id': 4253, 'synset': 'typical_jerboa.n.01', 'name': 'typical_jerboa'}, {'id': 4254, 'synset': 'jaculus_jaculus.n.01', 'name': 'Jaculus_jaculus'}, {'id': 4255, 'synset': 'dormouse.n.01', 'name': 'dormouse'}, {'id': 4256, 'synset': 'loir.n.01', 'name': 'loir'}, {'id': 4257, 'synset': 'hazel_mouse.n.01', 'name': 'hazel_mouse'}, {'id': 4258, 'synset': 'lerot.n.01', 'name': 'lerot'}, {'id': 4259, 'synset': 'gopher.n.04', 'name': 'gopher'}, {'id': 4260, 'synset': 'plains_pocket_gopher.n.01', 'name': 'plains_pocket_gopher'}, {'id': 4261, 'synset': 'southeastern_pocket_gopher.n.01', 'name': 'southeastern_pocket_gopher'}, {'id': 4262, 'synset': 'valley_pocket_gopher.n.01', 'name': 'valley_pocket_gopher'}, {'id': 4263, 'synset': 'northern_pocket_gopher.n.01', 'name': 'northern_pocket_gopher'}, {'id': 4264, 'synset': 'tree_squirrel.n.01', 'name': 'tree_squirrel'}, {'id': 4265, 'synset': 'eastern_grey_squirrel.n.01', 'name': 'eastern_grey_squirrel'}, {'id': 4266, 'synset': 'western_grey_squirrel.n.01', 'name': 'western_grey_squirrel'}, {'id': 4267, 'synset': 'fox_squirrel.n.01', 'name': 'fox_squirrel'}, {'id': 4268, 'synset': 'black_squirrel.n.01', 'name': 'black_squirrel'}, {'id': 4269, 'synset': 'red_squirrel.n.02', 'name': 'red_squirrel'}, {'id': 4270, 'synset': 'american_red_squirrel.n.01', 'name': 'American_red_squirrel'}, {'id': 4271, 'synset': 'chickeree.n.01', 'name': 'chickeree'}, {'id': 4272, 'synset': 'antelope_squirrel.n.01', 'name': 'antelope_squirrel'}, {'id': 4273, 'synset': 'ground_squirrel.n.02', 'name': 'ground_squirrel'}, {'id': 4274, 'synset': 'mantled_ground_squirrel.n.01', 'name': 'mantled_ground_squirrel'}, {'id': 4275, 'synset': 'suslik.n.01', 'name': 'suslik'}, {'id': 4276, 'synset': 'flickertail.n.01', 'name': 'flickertail'}, {'id': 4277, 'synset': 'rock_squirrel.n.01', 'name': 'rock_squirrel'}, {'id': 4278, 'synset': 'arctic_ground_squirrel.n.01', 'name': 'Arctic_ground_squirrel'}, {'id': 4279, 'synset': 'prairie_dog.n.01', 'name': 'prairie_dog'}, {'id': 4280, 'synset': 'blacktail_prairie_dog.n.01', 'name': 'blacktail_prairie_dog'}, {'id': 4281, 'synset': 'whitetail_prairie_dog.n.01', 'name': 'whitetail_prairie_dog'}, {'id': 4282, 'synset': 'eastern_chipmunk.n.01', 'name': 'eastern_chipmunk'}, {'id': 4283, 'synset': 'chipmunk.n.01', 'name': 'chipmunk'}, {'id': 4284, 'synset': 'baronduki.n.01', 'name': 'baronduki'}, {'id': 4285, 'synset': 'american_flying_squirrel.n.01', 'name': 'American_flying_squirrel'}, {'id': 4286, 'synset': 'southern_flying_squirrel.n.01', 'name': 'southern_flying_squirrel'}, {'id': 4287, 'synset': 'northern_flying_squirrel.n.01', 'name': 'northern_flying_squirrel'}, {'id': 4288, 'synset': 'marmot.n.01', 'name': 'marmot'}, {'id': 4289, 'synset': 'groundhog.n.01', 'name': 'groundhog'}, {'id': 4290, 'synset': 'hoary_marmot.n.01', 'name': 'hoary_marmot'}, {'id': 4291, 'synset': 'yellowbelly_marmot.n.01', 'name': 'yellowbelly_marmot'}, {'id': 4292, 'synset': 'asiatic_flying_squirrel.n.01', 'name': 'Asiatic_flying_squirrel'}, {'id': 4293, 'synset': 'beaver.n.07', 'name': 'beaver'}, {'id': 4294, 'synset': 'old_world_beaver.n.01', 'name': 'Old_World_beaver'}, {'id': 4295, 'synset': 'new_world_beaver.n.01', 'name': 'New_World_beaver'}, {'id': 4296, 'synset': 'mountain_beaver.n.01', 'name': 'mountain_beaver'}, {'id': 4297, 'synset': 'cavy.n.01', 'name': 'cavy'}, {'id': 4298, 'synset': 'guinea_pig.n.02', 'name': 'guinea_pig'}, {'id': 4299, 'synset': 'aperea.n.01', 'name': 'aperea'}, {'id': 4300, 'synset': 'mara.n.02', 'name': 'mara'}, {'id': 4301, 'synset': 'capybara.n.01', 'name': 'capybara'}, {'id': 4302, 'synset': 'agouti.n.01', 'name': 'agouti'}, {'id': 4303, 'synset': 'paca.n.01', 'name': 'paca'}, {'id': 4304, 'synset': 'mountain_paca.n.01', 'name': 'mountain_paca'}, {'id': 4305, 'synset': 'coypu.n.01', 'name': 'coypu'}, {'id': 4306, 'synset': 'chinchilla.n.03', 'name': 'chinchilla'}, {'id': 4307, 'synset': 'mountain_chinchilla.n.01', 'name': 'mountain_chinchilla'}, {'id': 4308, 'synset': 'viscacha.n.01', 'name': 'viscacha'}, {'id': 4309, 'synset': 'abrocome.n.01', 'name': 'abrocome'}, {'id': 4310, 'synset': 'mole_rat.n.02', 'name': 'mole_rat'}, {'id': 4311, 'synset': 'mole_rat.n.01', 'name': 'mole_rat'}, {'id': 4312, 'synset': 'sand_rat.n.01', 'name': 'sand_rat'}, {'id': 4313, 'synset': 'naked_mole_rat.n.01', 'name': 'naked_mole_rat'}, {'id': 4314, 'synset': 'queen.n.09', 'name': 'queen'}, {'id': 4315, 'synset': 'damaraland_mole_rat.n.01', 'name': 'Damaraland_mole_rat'}, {'id': 4316, 'synset': 'ungulata.n.01', 'name': 'Ungulata'}, {'id': 4317, 'synset': 'ungulate.n.01', 'name': 'ungulate'}, {'id': 4318, 'synset': 'unguiculate.n.01', 'name': 'unguiculate'}, {'id': 4319, 'synset': 'dinoceras.n.01', 'name': 'dinoceras'}, {'id': 4320, 'synset': 'hyrax.n.01', 'name': 'hyrax'}, {'id': 4321, 'synset': 'rock_hyrax.n.01', 'name': 'rock_hyrax'}, {'id': 4322, 'synset': 'odd-toed_ungulate.n.01', 'name': 'odd-toed_ungulate'}, {'id': 4323, 'synset': 'equine.n.01', 'name': 'equine'}, {'id': 4324, 'synset': 'roan.n.02', 'name': 'roan'}, {'id': 4325, 'synset': 'stablemate.n.01', 'name': 'stablemate'}, {'id': 4326, 'synset': 'gee-gee.n.01', 'name': 'gee-gee'}, {'id': 4327, 'synset': 'eohippus.n.01', 'name': 'eohippus'}, {'id': 4328, 'synset': 'filly.n.01', 'name': 'filly'}, {'id': 4329, 'synset': 'colt.n.01', 'name': 'colt'}, {'id': 4330, 'synset': 'male_horse.n.01', 'name': 'male_horse'}, {'id': 4331, 'synset': 'ridgeling.n.01', 'name': 'ridgeling'}, {'id': 4332, 'synset': 'stallion.n.01', 'name': 'stallion'}, {'id': 4333, 'synset': 'stud.n.04', 'name': 'stud'}, {'id': 4334, 'synset': 'gelding.n.01', 'name': 'gelding'}, {'id': 4335, 'synset': 'mare.n.01', 'name': 'mare'}, {'id': 4336, 'synset': 'broodmare.n.01', 'name': 'broodmare'}, {'id': 4337, 'synset': 'saddle_horse.n.01', 'name': 'saddle_horse'}, {'id': 4338, 'synset': 'remount.n.01', 'name': 'remount'}, {'id': 4339, 'synset': 'palfrey.n.01', 'name': 'palfrey'}, {'id': 4340, 'synset': 'warhorse.n.03', 'name': 'warhorse'}, {'id': 4341, 'synset': 'cavalry_horse.n.01', 'name': 'cavalry_horse'}, {'id': 4342, 'synset': 'charger.n.01', 'name': 'charger'}, {'id': 4343, 'synset': 'steed.n.01', 'name': 'steed'}, {'id': 4344, 'synset': 'prancer.n.01', 'name': 'prancer'}, {'id': 4345, 'synset': 'hack.n.08', 'name': 'hack'}, {'id': 4346, 'synset': 'cow_pony.n.01', 'name': 'cow_pony'}, {'id': 4347, 'synset': 'quarter_horse.n.01', 'name': 'quarter_horse'}, {'id': 4348, 'synset': 'morgan.n.06', 'name': 'Morgan'}, {'id': 4349, 'synset': 'tennessee_walker.n.01', 'name': 'Tennessee_walker'}, {'id': 4350, 'synset': 'american_saddle_horse.n.01', 'name': 'American_saddle_horse'}, {'id': 4351, 'synset': 'appaloosa.n.01', 'name': 'Appaloosa'}, {'id': 4352, 'synset': 'arabian.n.02', 'name': 'Arabian'}, {'id': 4353, 'synset': 'lippizan.n.01', 'name': 'Lippizan'}, {'id': 4354, 'synset': 'pony.n.01', 'name': 'pony'}, {'id': 4355, 'synset': 'polo_pony.n.01', 'name': 'polo_pony'}, {'id': 4356, 'synset': 'mustang.n.01', 'name': 'mustang'}, {'id': 4357, 'synset': 'bronco.n.01', 'name': 'bronco'}, {'id': 4358, 'synset': 'bucking_bronco.n.01', 'name': 'bucking_bronco'}, {'id': 4359, 'synset': 'buckskin.n.01', 'name': 'buckskin'}, {'id': 4360, 'synset': 'crowbait.n.01', 'name': 'crowbait'}, {'id': 4361, 'synset': 'dun.n.01', 'name': 'dun'}, {'id': 4362, 'synset': 'grey.n.07', 'name': 'grey'}, {'id': 4363, 'synset': 'wild_horse.n.01', 'name': 'wild_horse'}, {'id': 4364, 'synset': 'tarpan.n.01', 'name': 'tarpan'}, {'id': 4365, 'synset': "przewalski's_horse.n.01", 'name': "Przewalski's_horse"}, {'id': 4366, 'synset': 'cayuse.n.01', 'name': 'cayuse'}, {'id': 4367, 'synset': 'hack.n.07', 'name': 'hack'}, {'id': 4368, 'synset': 'hack.n.06', 'name': 'hack'}, {'id': 4369, 'synset': 'plow_horse.n.01', 'name': 'plow_horse'}, {'id': 4370, 'synset': 'shetland_pony.n.01', 'name': 'Shetland_pony'}, {'id': 4371, 'synset': 'welsh_pony.n.01', 'name': 'Welsh_pony'}, {'id': 4372, 'synset': 'exmoor.n.02', 'name': 'Exmoor'}, {'id': 4373, 'synset': 'racehorse.n.01', 'name': 'racehorse'}, {'id': 4374, 'synset': 'thoroughbred.n.02', 'name': 'thoroughbred'}, {'id': 4375, 'synset': 'steeplechaser.n.01', 'name': 'steeplechaser'}, {'id': 4376, 'synset': 'racer.n.03', 'name': 'racer'}, {'id': 4377, 'synset': 'finisher.n.06', 'name': 'finisher'}, {'id': 4378, 'synset': 'pony.n.02', 'name': 'pony'}, {'id': 4379, 'synset': 'yearling.n.02', 'name': 'yearling'}, {'id': 4380, 'synset': 'dark_horse.n.02', 'name': 'dark_horse'}, {'id': 4381, 'synset': 'mudder.n.01', 'name': 'mudder'}, {'id': 4382, 'synset': 'nonstarter.n.02', 'name': 'nonstarter'}, {'id': 4383, 'synset': 'stalking-horse.n.04', 'name': 'stalking-horse'}, {'id': 4384, 'synset': 'harness_horse.n.01', 'name': 'harness_horse'}, {'id': 4385, 'synset': 'cob.n.02', 'name': 'cob'}, {'id': 4386, 'synset': 'hackney.n.02', 'name': 'hackney'}, {'id': 4387, 'synset': 'workhorse.n.02', 'name': 'workhorse'}, {'id': 4388, 'synset': 'draft_horse.n.01', 'name': 'draft_horse'}, {'id': 4389, 'synset': 'packhorse.n.01', 'name': 'packhorse'}, {'id': 4390, 'synset': 'carthorse.n.01', 'name': 'carthorse'}, {'id': 4391, 'synset': 'clydesdale.n.01', 'name': 'Clydesdale'}, {'id': 4392, 'synset': 'percheron.n.01', 'name': 'Percheron'}, {'id': 4393, 'synset': 'farm_horse.n.01', 'name': 'farm_horse'}, {'id': 4394, 'synset': 'shire.n.02', 'name': 'shire'}, {'id': 4395, 'synset': 'pole_horse.n.02', 'name': 'pole_horse'}, {'id': 4396, 'synset': 'post_horse.n.01', 'name': 'post_horse'}, {'id': 4397, 'synset': 'coach_horse.n.01', 'name': 'coach_horse'}, {'id': 4398, 'synset': 'pacer.n.02', 'name': 'pacer'}, {'id': 4399, 'synset': 'pacer.n.01', 'name': 'pacer'}, {'id': 4400, 'synset': 'trotting_horse.n.01', 'name': 'trotting_horse'}, {'id': 4401, 'synset': 'pole_horse.n.01', 'name': 'pole_horse'}, {'id': 4402, 'synset': 'stepper.n.03', 'name': 'stepper'}, {'id': 4403, 'synset': 'chestnut.n.06', 'name': 'chestnut'}, {'id': 4404, 'synset': 'liver_chestnut.n.01', 'name': 'liver_chestnut'}, {'id': 4405, 'synset': 'bay.n.07', 'name': 'bay'}, {'id': 4406, 'synset': 'sorrel.n.05', 'name': 'sorrel'}, {'id': 4407, 'synset': 'palomino.n.01', 'name': 'palomino'}, {'id': 4408, 'synset': 'pinto.n.01', 'name': 'pinto'}, {'id': 4409, 'synset': 'ass.n.03', 'name': 'ass'}, {'id': 4410, 'synset': 'burro.n.01', 'name': 'burro'}, {'id': 4411, 'synset': 'moke.n.01', 'name': 'moke'}, {'id': 4412, 'synset': 'jack.n.12', 'name': 'jack'}, {'id': 4413, 'synset': 'jennet.n.01', 'name': 'jennet'}, {'id': 4414, 'synset': 'mule.n.01', 'name': 'mule'}, {'id': 4415, 'synset': 'hinny.n.01', 'name': 'hinny'}, {'id': 4416, 'synset': 'wild_ass.n.01', 'name': 'wild_ass'}, {'id': 4417, 'synset': 'african_wild_ass.n.01', 'name': 'African_wild_ass'}, {'id': 4418, 'synset': 'kiang.n.01', 'name': 'kiang'}, {'id': 4419, 'synset': 'onager.n.02', 'name': 'onager'}, {'id': 4420, 'synset': 'chigetai.n.01', 'name': 'chigetai'}, {'id': 4421, 'synset': 'common_zebra.n.01', 'name': 'common_zebra'}, {'id': 4422, 'synset': 'mountain_zebra.n.01', 'name': 'mountain_zebra'}, {'id': 4423, 'synset': "grevy's_zebra.n.01", 'name': "grevy's_zebra"}, {'id': 4424, 'synset': 'quagga.n.01', 'name': 'quagga'}, {'id': 4425, 'synset': 'indian_rhinoceros.n.01', 'name': 'Indian_rhinoceros'}, {'id': 4426, 'synset': 'woolly_rhinoceros.n.01', 'name': 'woolly_rhinoceros'}, {'id': 4427, 'synset': 'white_rhinoceros.n.01', 'name': 'white_rhinoceros'}, {'id': 4428, 'synset': 'black_rhinoceros.n.01', 'name': 'black_rhinoceros'}, {'id': 4429, 'synset': 'tapir.n.01', 'name': 'tapir'}, {'id': 4430, 'synset': 'new_world_tapir.n.01', 'name': 'New_World_tapir'}, {'id': 4431, 'synset': 'malayan_tapir.n.01', 'name': 'Malayan_tapir'}, {'id': 4432, 'synset': 'even-toed_ungulate.n.01', 'name': 'even-toed_ungulate'}, {'id': 4433, 'synset': 'swine.n.01', 'name': 'swine'}, {'id': 4434, 'synset': 'piglet.n.01', 'name': 'piglet'}, {'id': 4435, 'synset': 'sucking_pig.n.01', 'name': 'sucking_pig'}, {'id': 4436, 'synset': 'porker.n.01', 'name': 'porker'}, {'id': 4437, 'synset': 'boar.n.02', 'name': 'boar'}, {'id': 4438, 'synset': 'sow.n.01', 'name': 'sow'}, {'id': 4439, 'synset': 'razorback.n.01', 'name': 'razorback'}, {'id': 4440, 'synset': 'wild_boar.n.01', 'name': 'wild_boar'}, {'id': 4441, 'synset': 'babirusa.n.01', 'name': 'babirusa'}, {'id': 4442, 'synset': 'warthog.n.01', 'name': 'warthog'}, {'id': 4443, 'synset': 'peccary.n.01', 'name': 'peccary'}, {'id': 4444, 'synset': 'collared_peccary.n.01', 'name': 'collared_peccary'}, {'id': 4445, 'synset': 'white-lipped_peccary.n.01', 'name': 'white-lipped_peccary'}, {'id': 4446, 'synset': 'ruminant.n.01', 'name': 'ruminant'}, {'id': 4447, 'synset': 'bovid.n.01', 'name': 'bovid'}, {'id': 4448, 'synset': 'bovine.n.01', 'name': 'bovine'}, {'id': 4449, 'synset': 'ox.n.02', 'name': 'ox'}, {'id': 4450, 'synset': 'cattle.n.01', 'name': 'cattle'}, {'id': 4451, 'synset': 'ox.n.01', 'name': 'ox'}, {'id': 4452, 'synset': 'stirk.n.01', 'name': 'stirk'}, {'id': 4453, 'synset': 'bullock.n.02', 'name': 'bullock'}, {'id': 4454, 'synset': 'bull.n.01', 'name': 'bull'}, {'id': 4455, 'synset': 'cow.n.01', 'name': 'cow'}, {'id': 4456, 'synset': 'heifer.n.01', 'name': 'heifer'}, {'id': 4457, 'synset': 'bullock.n.01', 'name': 'bullock'}, {'id': 4458, 'synset': 'dogie.n.01', 'name': 'dogie'}, {'id': 4459, 'synset': 'maverick.n.02', 'name': 'maverick'}, {'id': 4460, 'synset': 'longhorn.n.01', 'name': 'longhorn'}, {'id': 4461, 'synset': 'brahman.n.04', 'name': 'Brahman'}, {'id': 4462, 'synset': 'zebu.n.01', 'name': 'zebu'}, {'id': 4463, 'synset': 'aurochs.n.02', 'name': 'aurochs'}, {'id': 4464, 'synset': 'yak.n.02', 'name': 'yak'}, {'id': 4465, 'synset': 'banteng.n.01', 'name': 'banteng'}, {'id': 4466, 'synset': 'welsh.n.03', 'name': 'Welsh'}, {'id': 4467, 'synset': 'red_poll.n.01', 'name': 'red_poll'}, {'id': 4468, 'synset': 'santa_gertrudis.n.01', 'name': 'Santa_Gertrudis'}, {'id': 4469, 'synset': 'aberdeen_angus.n.01', 'name': 'Aberdeen_Angus'}, {'id': 4470, 'synset': 'africander.n.01', 'name': 'Africander'}, {'id': 4471, 'synset': 'dairy_cattle.n.01', 'name': 'dairy_cattle'}, {'id': 4472, 'synset': 'ayrshire.n.01', 'name': 'Ayrshire'}, {'id': 4473, 'synset': 'brown_swiss.n.01', 'name': 'Brown_Swiss'}, {'id': 4474, 'synset': 'charolais.n.01', 'name': 'Charolais'}, {'id': 4475, 'synset': 'jersey.n.05', 'name': 'Jersey'}, {'id': 4476, 'synset': 'devon.n.02', 'name': 'Devon'}, {'id': 4477, 'synset': 'grade.n.09', 'name': 'grade'}, {'id': 4478, 'synset': 'durham.n.02', 'name': 'Durham'}, {'id': 4479, 'synset': 'milking_shorthorn.n.01', 'name': 'milking_shorthorn'}, {'id': 4480, 'synset': 'galloway.n.02', 'name': 'Galloway'}, {'id': 4481, 'synset': 'friesian.n.01', 'name': 'Friesian'}, {'id': 4482, 'synset': 'guernsey.n.02', 'name': 'Guernsey'}, {'id': 4483, 'synset': 'hereford.n.01', 'name': 'Hereford'}, {'id': 4484, 'synset': 'cattalo.n.01', 'name': 'cattalo'}, {'id': 4485, 'synset': 'old_world_buffalo.n.01', 'name': 'Old_World_buffalo'}, {'id': 4486, 'synset': 'water_buffalo.n.01', 'name': 'water_buffalo'}, {'id': 4487, 'synset': 'indian_buffalo.n.01', 'name': 'Indian_buffalo'}, {'id': 4488, 'synset': 'carabao.n.01', 'name': 'carabao'}, {'id': 4489, 'synset': 'anoa.n.01', 'name': 'anoa'}, {'id': 4490, 'synset': 'tamarau.n.01', 'name': 'tamarau'}, {'id': 4491, 'synset': 'cape_buffalo.n.01', 'name': 'Cape_buffalo'}, {'id': 4492, 'synset': 'asian_wild_ox.n.01', 'name': 'Asian_wild_ox'}, {'id': 4493, 'synset': 'gaur.n.01', 'name': 'gaur'}, {'id': 4494, 'synset': 'gayal.n.01', 'name': 'gayal'}, {'id': 4495, 'synset': 'bison.n.01', 'name': 'bison'}, {'id': 4496, 'synset': 'american_bison.n.01', 'name': 'American_bison'}, {'id': 4497, 'synset': 'wisent.n.01', 'name': 'wisent'}, {'id': 4498, 'synset': 'musk_ox.n.01', 'name': 'musk_ox'}, {'id': 4499, 'synset': 'ewe.n.03', 'name': 'ewe'}, {'id': 4500, 'synset': 'wether.n.01', 'name': 'wether'}, {'id': 4501, 'synset': 'lambkin.n.01', 'name': 'lambkin'}, {'id': 4502, 'synset': 'baa-lamb.n.01', 'name': 'baa-lamb'}, {'id': 4503, 'synset': 'hog.n.02', 'name': 'hog'}, {'id': 4504, 'synset': 'teg.n.01', 'name': 'teg'}, {'id': 4505, 'synset': 'persian_lamb.n.02', 'name': 'Persian_lamb'}, {'id': 4506, 'synset': 'domestic_sheep.n.01', 'name': 'domestic_sheep'}, {'id': 4507, 'synset': 'cotswold.n.01', 'name': 'Cotswold'}, {'id': 4508, 'synset': 'hampshire.n.02', 'name': 'Hampshire'}, {'id': 4509, 'synset': 'lincoln.n.03', 'name': 'Lincoln'}, {'id': 4510, 'synset': 'exmoor.n.01', 'name': 'Exmoor'}, {'id': 4511, 'synset': 'cheviot.n.01', 'name': 'Cheviot'}, {'id': 4512, 'synset': 'broadtail.n.02', 'name': 'broadtail'}, {'id': 4513, 'synset': 'longwool.n.01', 'name': 'longwool'}, {'id': 4514, 'synset': 'merino.n.01', 'name': 'merino'}, {'id': 4515, 'synset': 'rambouillet.n.01', 'name': 'Rambouillet'}, {'id': 4516, 'synset': 'wild_sheep.n.01', 'name': 'wild_sheep'}, {'id': 4517, 'synset': 'argali.n.01', 'name': 'argali'}, {'id': 4518, 'synset': 'marco_polo_sheep.n.01', 'name': 'Marco_Polo_sheep'}, {'id': 4519, 'synset': 'urial.n.01', 'name': 'urial'}, {'id': 4520, 'synset': 'dall_sheep.n.01', 'name': 'Dall_sheep'}, {'id': 4521, 'synset': 'mountain_sheep.n.01', 'name': 'mountain_sheep'}, {'id': 4522, 'synset': 'bighorn.n.02', 'name': 'bighorn'}, {'id': 4523, 'synset': 'mouflon.n.01', 'name': 'mouflon'}, {'id': 4524, 'synset': 'aoudad.n.01', 'name': 'aoudad'}, {'id': 4525, 'synset': 'kid.n.05', 'name': 'kid'}, {'id': 4526, 'synset': 'billy.n.02', 'name': 'billy'}, {'id': 4527, 'synset': 'nanny.n.02', 'name': 'nanny'}, {'id': 4528, 'synset': 'domestic_goat.n.01', 'name': 'domestic_goat'}, {'id': 4529, 'synset': 'cashmere_goat.n.01', 'name': 'Cashmere_goat'}, {'id': 4530, 'synset': 'angora.n.02', 'name': 'Angora'}, {'id': 4531, 'synset': 'wild_goat.n.01', 'name': 'wild_goat'}, {'id': 4532, 'synset': 'bezoar_goat.n.01', 'name': 'bezoar_goat'}, {'id': 4533, 'synset': 'markhor.n.01', 'name': 'markhor'}, {'id': 4534, 'synset': 'ibex.n.01', 'name': 'ibex'}, {'id': 4535, 'synset': 'goat_antelope.n.01', 'name': 'goat_antelope'}, {'id': 4536, 'synset': 'mountain_goat.n.01', 'name': 'mountain_goat'}, {'id': 4537, 'synset': 'goral.n.01', 'name': 'goral'}, {'id': 4538, 'synset': 'serow.n.01', 'name': 'serow'}, {'id': 4539, 'synset': 'chamois.n.02', 'name': 'chamois'}, {'id': 4540, 'synset': 'takin.n.01', 'name': 'takin'}, {'id': 4541, 'synset': 'antelope.n.01', 'name': 'antelope'}, {'id': 4542, 'synset': 'blackbuck.n.01', 'name': 'blackbuck'}, {'id': 4543, 'synset': 'gerenuk.n.01', 'name': 'gerenuk'}, {'id': 4544, 'synset': 'addax.n.01', 'name': 'addax'}, {'id': 4545, 'synset': 'gnu.n.01', 'name': 'gnu'}, {'id': 4546, 'synset': 'dik-dik.n.01', 'name': 'dik-dik'}, {'id': 4547, 'synset': 'hartebeest.n.01', 'name': 'hartebeest'}, {'id': 4548, 'synset': 'sassaby.n.01', 'name': 'sassaby'}, {'id': 4549, 'synset': 'impala.n.01', 'name': 'impala'}, {'id': 4550, 'synset': "thomson's_gazelle.n.01", 'name': "Thomson's_gazelle"}, {'id': 4551, 'synset': 'gazella_subgutturosa.n.01', 'name': 'Gazella_subgutturosa'}, {'id': 4552, 'synset': 'springbok.n.01', 'name': 'springbok'}, {'id': 4553, 'synset': 'bongo.n.02', 'name': 'bongo'}, {'id': 4554, 'synset': 'kudu.n.01', 'name': 'kudu'}, {'id': 4555, 'synset': 'greater_kudu.n.01', 'name': 'greater_kudu'}, {'id': 4556, 'synset': 'lesser_kudu.n.01', 'name': 'lesser_kudu'}, {'id': 4557, 'synset': 'harnessed_antelope.n.01', 'name': 'harnessed_antelope'}, {'id': 4558, 'synset': 'nyala.n.02', 'name': 'nyala'}, {'id': 4559, 'synset': 'mountain_nyala.n.01', 'name': 'mountain_nyala'}, {'id': 4560, 'synset': 'bushbuck.n.01', 'name': 'bushbuck'}, {'id': 4561, 'synset': 'nilgai.n.01', 'name': 'nilgai'}, {'id': 4562, 'synset': 'sable_antelope.n.01', 'name': 'sable_antelope'}, {'id': 4563, 'synset': 'saiga.n.01', 'name': 'saiga'}, {'id': 4564, 'synset': 'steenbok.n.01', 'name': 'steenbok'}, {'id': 4565, 'synset': 'eland.n.01', 'name': 'eland'}, {'id': 4566, 'synset': 'common_eland.n.01', 'name': 'common_eland'}, {'id': 4567, 'synset': 'giant_eland.n.01', 'name': 'giant_eland'}, {'id': 4568, 'synset': 'kob.n.01', 'name': 'kob'}, {'id': 4569, 'synset': 'lechwe.n.01', 'name': 'lechwe'}, {'id': 4570, 'synset': 'waterbuck.n.01', 'name': 'waterbuck'}, {'id': 4571, 'synset': 'puku.n.01', 'name': 'puku'}, {'id': 4572, 'synset': 'oryx.n.01', 'name': 'oryx'}, {'id': 4573, 'synset': 'gemsbok.n.01', 'name': 'gemsbok'}, {'id': 4574, 'synset': 'forest_goat.n.01', 'name': 'forest_goat'}, {'id': 4575, 'synset': 'pronghorn.n.01', 'name': 'pronghorn'}, {'id': 4576, 'synset': 'stag.n.02', 'name': 'stag'}, {'id': 4577, 'synset': 'royal.n.02', 'name': 'royal'}, {'id': 4578, 'synset': 'pricket.n.02', 'name': 'pricket'}, {'id': 4579, 'synset': 'fawn.n.02', 'name': 'fawn'}, {'id': 4580, 'synset': 'red_deer.n.01', 'name': 'red_deer'}, {'id': 4581, 'synset': 'hart.n.03', 'name': 'hart'}, {'id': 4582, 'synset': 'hind.n.02', 'name': 'hind'}, {'id': 4583, 'synset': 'brocket.n.02', 'name': 'brocket'}, {'id': 4584, 'synset': 'sambar.n.01', 'name': 'sambar'}, {'id': 4585, 'synset': 'wapiti.n.01', 'name': 'wapiti'}, {'id': 4586, 'synset': 'japanese_deer.n.01', 'name': 'Japanese_deer'}, {'id': 4587, 'synset': 'virginia_deer.n.01', 'name': 'Virginia_deer'}, {'id': 4588, 'synset': 'mule_deer.n.01', 'name': 'mule_deer'}, {'id': 4589, 'synset': 'black-tailed_deer.n.01', 'name': 'black-tailed_deer'}, {'id': 4590, 'synset': 'fallow_deer.n.01', 'name': 'fallow_deer'}, {'id': 4591, 'synset': 'roe_deer.n.01', 'name': 'roe_deer'}, {'id': 4592, 'synset': 'roebuck.n.01', 'name': 'roebuck'}, {'id': 4593, 'synset': 'caribou.n.01', 'name': 'caribou'}, {'id': 4594, 'synset': 'woodland_caribou.n.01', 'name': 'woodland_caribou'}, {'id': 4595, 'synset': 'barren_ground_caribou.n.01', 'name': 'barren_ground_caribou'}, {'id': 4596, 'synset': 'brocket.n.01', 'name': 'brocket'}, {'id': 4597, 'synset': 'muntjac.n.01', 'name': 'muntjac'}, {'id': 4598, 'synset': 'musk_deer.n.01', 'name': 'musk_deer'}, {'id': 4599, 'synset': "pere_david's_deer.n.01", 'name': "pere_david's_deer"}, {'id': 4600, 'synset': 'chevrotain.n.01', 'name': 'chevrotain'}, {'id': 4601, 'synset': 'kanchil.n.01', 'name': 'kanchil'}, {'id': 4602, 'synset': 'napu.n.01', 'name': 'napu'}, {'id': 4603, 'synset': 'water_chevrotain.n.01', 'name': 'water_chevrotain'}, {'id': 4604, 'synset': 'arabian_camel.n.01', 'name': 'Arabian_camel'}, {'id': 4605, 'synset': 'bactrian_camel.n.01', 'name': 'Bactrian_camel'}, {'id': 4606, 'synset': 'llama.n.01', 'name': 'llama'}, {'id': 4607, 'synset': 'domestic_llama.n.01', 'name': 'domestic_llama'}, {'id': 4608, 'synset': 'guanaco.n.01', 'name': 'guanaco'}, {'id': 4609, 'synset': 'alpaca.n.03', 'name': 'alpaca'}, {'id': 4610, 'synset': 'vicuna.n.03', 'name': 'vicuna'}, {'id': 4611, 'synset': 'okapi.n.01', 'name': 'okapi'}, {'id': 4612, 'synset': 'musteline_mammal.n.01', 'name': 'musteline_mammal'}, {'id': 4613, 'synset': 'weasel.n.02', 'name': 'weasel'}, {'id': 4614, 'synset': 'ermine.n.02', 'name': 'ermine'}, {'id': 4615, 'synset': 'stoat.n.01', 'name': 'stoat'}, {'id': 4616, 'synset': 'new_world_least_weasel.n.01', 'name': 'New_World_least_weasel'}, {'id': 4617, 'synset': 'old_world_least_weasel.n.01', 'name': 'Old_World_least_weasel'}, {'id': 4618, 'synset': 'longtail_weasel.n.01', 'name': 'longtail_weasel'}, {'id': 4619, 'synset': 'mink.n.03', 'name': 'mink'}, {'id': 4620, 'synset': 'american_mink.n.01', 'name': 'American_mink'}, {'id': 4621, 'synset': 'polecat.n.02', 'name': 'polecat'}, {'id': 4622, 'synset': 'black-footed_ferret.n.01', 'name': 'black-footed_ferret'}, {'id': 4623, 'synset': 'muishond.n.01', 'name': 'muishond'}, {'id': 4624, 'synset': 'snake_muishond.n.01', 'name': 'snake_muishond'}, {'id': 4625, 'synset': 'striped_muishond.n.01', 'name': 'striped_muishond'}, {'id': 4626, 'synset': 'otter.n.02', 'name': 'otter'}, {'id': 4627, 'synset': 'river_otter.n.01', 'name': 'river_otter'}, {'id': 4628, 'synset': 'eurasian_otter.n.01', 'name': 'Eurasian_otter'}, {'id': 4629, 'synset': 'sea_otter.n.01', 'name': 'sea_otter'}, {'id': 4630, 'synset': 'skunk.n.04', 'name': 'skunk'}, {'id': 4631, 'synset': 'striped_skunk.n.01', 'name': 'striped_skunk'}, {'id': 4632, 'synset': 'hooded_skunk.n.01', 'name': 'hooded_skunk'}, {'id': 4633, 'synset': 'hog-nosed_skunk.n.01', 'name': 'hog-nosed_skunk'}, {'id': 4634, 'synset': 'spotted_skunk.n.01', 'name': 'spotted_skunk'}, {'id': 4635, 'synset': 'badger.n.02', 'name': 'badger'}, {'id': 4636, 'synset': 'american_badger.n.01', 'name': 'American_badger'}, {'id': 4637, 'synset': 'eurasian_badger.n.01', 'name': 'Eurasian_badger'}, {'id': 4638, 'synset': 'ratel.n.01', 'name': 'ratel'}, {'id': 4639, 'synset': 'ferret_badger.n.01', 'name': 'ferret_badger'}, {'id': 4640, 'synset': 'hog_badger.n.01', 'name': 'hog_badger'}, {'id': 4641, 'synset': 'wolverine.n.03', 'name': 'wolverine'}, {'id': 4642, 'synset': 'glutton.n.02', 'name': 'glutton'}, {'id': 4643, 'synset': 'grison.n.01', 'name': 'grison'}, {'id': 4644, 'synset': 'marten.n.01', 'name': 'marten'}, {'id': 4645, 'synset': 'pine_marten.n.01', 'name': 'pine_marten'}, {'id': 4646, 'synset': 'sable.n.05', 'name': 'sable'}, {'id': 4647, 'synset': 'american_marten.n.01', 'name': 'American_marten'}, {'id': 4648, 'synset': 'stone_marten.n.01', 'name': 'stone_marten'}, {'id': 4649, 'synset': 'fisher.n.02', 'name': 'fisher'}, {'id': 4650, 'synset': 'yellow-throated_marten.n.01', 'name': 'yellow-throated_marten'}, {'id': 4651, 'synset': 'tayra.n.01', 'name': 'tayra'}, {'id': 4652, 'synset': 'fictional_animal.n.01', 'name': 'fictional_animal'}, {'id': 4653, 'synset': 'pachyderm.n.01', 'name': 'pachyderm'}, {'id': 4654, 'synset': 'edentate.n.01', 'name': 'edentate'}, {'id': 4655, 'synset': 'armadillo.n.01', 'name': 'armadillo'}, {'id': 4656, 'synset': 'peba.n.01', 'name': 'peba'}, {'id': 4657, 'synset': 'apar.n.01', 'name': 'apar'}, {'id': 4658, 'synset': 'tatouay.n.01', 'name': 'tatouay'}, {'id': 4659, 'synset': 'peludo.n.01', 'name': 'peludo'}, {'id': 4660, 'synset': 'giant_armadillo.n.01', 'name': 'giant_armadillo'}, {'id': 4661, 'synset': 'pichiciago.n.01', 'name': 'pichiciago'}, {'id': 4662, 'synset': 'sloth.n.02', 'name': 'sloth'}, {'id': 4663, 'synset': 'three-toed_sloth.n.01', 'name': 'three-toed_sloth'}, {'id': 4664, 'synset': 'two-toed_sloth.n.02', 'name': 'two-toed_sloth'}, {'id': 4665, 'synset': 'two-toed_sloth.n.01', 'name': 'two-toed_sloth'}, {'id': 4666, 'synset': 'megatherian.n.01', 'name': 'megatherian'}, {'id': 4667, 'synset': 'mylodontid.n.01', 'name': 'mylodontid'}, {'id': 4668, 'synset': 'anteater.n.02', 'name': 'anteater'}, {'id': 4669, 'synset': 'ant_bear.n.01', 'name': 'ant_bear'}, {'id': 4670, 'synset': 'silky_anteater.n.01', 'name': 'silky_anteater'}, {'id': 4671, 'synset': 'tamandua.n.01', 'name': 'tamandua'}, {'id': 4672, 'synset': 'pangolin.n.01', 'name': 'pangolin'}, {'id': 4673, 'synset': 'coronet.n.02', 'name': 'coronet'}, {'id': 4674, 'synset': 'scapular.n.01', 'name': 'scapular'}, {'id': 4675, 'synset': 'tadpole.n.01', 'name': 'tadpole'}, {'id': 4676, 'synset': 'primate.n.02', 'name': 'primate'}, {'id': 4677, 'synset': 'simian.n.01', 'name': 'simian'}, {'id': 4678, 'synset': 'ape.n.01', 'name': 'ape'}, {'id': 4679, 'synset': 'anthropoid.n.02', 'name': 'anthropoid'}, {'id': 4680, 'synset': 'anthropoid_ape.n.01', 'name': 'anthropoid_ape'}, {'id': 4681, 'synset': 'hominoid.n.01', 'name': 'hominoid'}, {'id': 4682, 'synset': 'hominid.n.01', 'name': 'hominid'}, {'id': 4683, 'synset': 'homo.n.02', 'name': 'homo'}, {'id': 4684, 'synset': 'world.n.08', 'name': 'world'}, {'id': 4685, 'synset': 'homo_erectus.n.01', 'name': 'Homo_erectus'}, {'id': 4686, 'synset': 'pithecanthropus.n.01', 'name': 'Pithecanthropus'}, {'id': 4687, 'synset': 'java_man.n.01', 'name': 'Java_man'}, {'id': 4688, 'synset': 'peking_man.n.01', 'name': 'Peking_man'}, {'id': 4689, 'synset': 'sinanthropus.n.01', 'name': 'Sinanthropus'}, {'id': 4690, 'synset': 'homo_soloensis.n.01', 'name': 'Homo_soloensis'}, {'id': 4691, 'synset': 'javanthropus.n.01', 'name': 'Javanthropus'}, {'id': 4692, 'synset': 'homo_habilis.n.01', 'name': 'Homo_habilis'}, {'id': 4693, 'synset': 'homo_sapiens.n.01', 'name': 'Homo_sapiens'}, {'id': 4694, 'synset': 'neandertal_man.n.01', 'name': 'Neandertal_man'}, {'id': 4695, 'synset': 'cro-magnon.n.01', 'name': 'Cro-magnon'}, {'id': 4696, 'synset': 'homo_sapiens_sapiens.n.01', 'name': 'Homo_sapiens_sapiens'}, {'id': 4697, 'synset': 'australopithecine.n.01', 'name': 'australopithecine'}, {'id': 4698, 'synset': 'australopithecus_afarensis.n.01', 'name': 'Australopithecus_afarensis'}, {'id': 4699, 'synset': 'australopithecus_africanus.n.01', 'name': 'Australopithecus_africanus'}, {'id': 4700, 'synset': 'australopithecus_boisei.n.01', 'name': 'Australopithecus_boisei'}, {'id': 4701, 'synset': 'zinjanthropus.n.01', 'name': 'Zinjanthropus'}, {'id': 4702, 'synset': 'australopithecus_robustus.n.01', 'name': 'Australopithecus_robustus'}, {'id': 4703, 'synset': 'paranthropus.n.01', 'name': 'Paranthropus'}, {'id': 4704, 'synset': 'sivapithecus.n.01', 'name': 'Sivapithecus'}, {'id': 4705, 'synset': 'rudapithecus.n.01', 'name': 'rudapithecus'}, {'id': 4706, 'synset': 'proconsul.n.03', 'name': 'proconsul'}, {'id': 4707, 'synset': 'aegyptopithecus.n.01', 'name': 'Aegyptopithecus'}, {'id': 4708, 'synset': 'great_ape.n.01', 'name': 'great_ape'}, {'id': 4709, 'synset': 'orangutan.n.01', 'name': 'orangutan'}, {'id': 4710, 'synset': 'western_lowland_gorilla.n.01', 'name': 'western_lowland_gorilla'}, {'id': 4711, 'synset': 'eastern_lowland_gorilla.n.01', 'name': 'eastern_lowland_gorilla'}, {'id': 4712, 'synset': 'mountain_gorilla.n.01', 'name': 'mountain_gorilla'}, {'id': 4713, 'synset': 'silverback.n.01', 'name': 'silverback'}, {'id': 4714, 'synset': 'chimpanzee.n.01', 'name': 'chimpanzee'}, {'id': 4715, 'synset': 'western_chimpanzee.n.01', 'name': 'western_chimpanzee'}, {'id': 4716, 'synset': 'eastern_chimpanzee.n.01', 'name': 'eastern_chimpanzee'}, {'id': 4717, 'synset': 'central_chimpanzee.n.01', 'name': 'central_chimpanzee'}, {'id': 4718, 'synset': 'pygmy_chimpanzee.n.01', 'name': 'pygmy_chimpanzee'}, {'id': 4719, 'synset': 'lesser_ape.n.01', 'name': 'lesser_ape'}, {'id': 4720, 'synset': 'gibbon.n.02', 'name': 'gibbon'}, {'id': 4721, 'synset': 'siamang.n.01', 'name': 'siamang'}, {'id': 4722, 'synset': 'old_world_monkey.n.01', 'name': 'Old_World_monkey'}, {'id': 4723, 'synset': 'guenon.n.01', 'name': 'guenon'}, {'id': 4724, 'synset': 'talapoin.n.01', 'name': 'talapoin'}, {'id': 4725, 'synset': 'grivet.n.01', 'name': 'grivet'}, {'id': 4726, 'synset': 'vervet.n.01', 'name': 'vervet'}, {'id': 4727, 'synset': 'green_monkey.n.01', 'name': 'green_monkey'}, {'id': 4728, 'synset': 'mangabey.n.01', 'name': 'mangabey'}, {'id': 4729, 'synset': 'patas.n.01', 'name': 'patas'}, {'id': 4730, 'synset': 'chacma.n.01', 'name': 'chacma'}, {'id': 4731, 'synset': 'mandrill.n.01', 'name': 'mandrill'}, {'id': 4732, 'synset': 'drill.n.02', 'name': 'drill'}, {'id': 4733, 'synset': 'macaque.n.01', 'name': 'macaque'}, {'id': 4734, 'synset': 'rhesus.n.01', 'name': 'rhesus'}, {'id': 4735, 'synset': 'bonnet_macaque.n.01', 'name': 'bonnet_macaque'}, {'id': 4736, 'synset': 'barbary_ape.n.01', 'name': 'Barbary_ape'}, {'id': 4737, 'synset': 'crab-eating_macaque.n.01', 'name': 'crab-eating_macaque'}, {'id': 4738, 'synset': 'langur.n.01', 'name': 'langur'}, {'id': 4739, 'synset': 'entellus.n.01', 'name': 'entellus'}, {'id': 4740, 'synset': 'colobus.n.01', 'name': 'colobus'}, {'id': 4741, 'synset': 'guereza.n.01', 'name': 'guereza'}, {'id': 4742, 'synset': 'proboscis_monkey.n.01', 'name': 'proboscis_monkey'}, {'id': 4743, 'synset': 'new_world_monkey.n.01', 'name': 'New_World_monkey'}, {'id': 4744, 'synset': 'marmoset.n.01', 'name': 'marmoset'}, {'id': 4745, 'synset': 'true_marmoset.n.01', 'name': 'true_marmoset'}, {'id': 4746, 'synset': 'pygmy_marmoset.n.01', 'name': 'pygmy_marmoset'}, {'id': 4747, 'synset': 'tamarin.n.01', 'name': 'tamarin'}, {'id': 4748, 'synset': 'silky_tamarin.n.01', 'name': 'silky_tamarin'}, {'id': 4749, 'synset': 'pinche.n.01', 'name': 'pinche'}, {'id': 4750, 'synset': 'capuchin.n.02', 'name': 'capuchin'}, {'id': 4751, 'synset': 'douroucouli.n.01', 'name': 'douroucouli'}, {'id': 4752, 'synset': 'howler_monkey.n.01', 'name': 'howler_monkey'}, {'id': 4753, 'synset': 'saki.n.03', 'name': 'saki'}, {'id': 4754, 'synset': 'uakari.n.01', 'name': 'uakari'}, {'id': 4755, 'synset': 'titi.n.03', 'name': 'titi'}, {'id': 4756, 'synset': 'spider_monkey.n.01', 'name': 'spider_monkey'}, {'id': 4757, 'synset': 'squirrel_monkey.n.01', 'name': 'squirrel_monkey'}, {'id': 4758, 'synset': 'woolly_monkey.n.01', 'name': 'woolly_monkey'}, {'id': 4759, 'synset': 'tree_shrew.n.01', 'name': 'tree_shrew'}, {'id': 4760, 'synset': 'prosimian.n.01', 'name': 'prosimian'}, {'id': 4761, 'synset': 'lemur.n.01', 'name': 'lemur'}, {'id': 4762, 'synset': 'madagascar_cat.n.01', 'name': 'Madagascar_cat'}, {'id': 4763, 'synset': 'aye-aye.n.01', 'name': 'aye-aye'}, {'id': 4764, 'synset': 'slender_loris.n.01', 'name': 'slender_loris'}, {'id': 4765, 'synset': 'slow_loris.n.01', 'name': 'slow_loris'}, {'id': 4766, 'synset': 'potto.n.02', 'name': 'potto'}, {'id': 4767, 'synset': 'angwantibo.n.01', 'name': 'angwantibo'}, {'id': 4768, 'synset': 'galago.n.01', 'name': 'galago'}, {'id': 4769, 'synset': 'indri.n.01', 'name': 'indri'}, {'id': 4770, 'synset': 'woolly_indris.n.01', 'name': 'woolly_indris'}, {'id': 4771, 'synset': 'tarsier.n.01', 'name': 'tarsier'}, {'id': 4772, 'synset': 'tarsius_syrichta.n.01', 'name': 'Tarsius_syrichta'}, {'id': 4773, 'synset': 'tarsius_glis.n.01', 'name': 'Tarsius_glis'}, {'id': 4774, 'synset': 'flying_lemur.n.01', 'name': 'flying_lemur'}, {'id': 4775, 'synset': 'cynocephalus_variegatus.n.01', 'name': 'Cynocephalus_variegatus'}, {'id': 4776, 'synset': 'proboscidean.n.01', 'name': 'proboscidean'}, {'id': 4777, 'synset': 'rogue_elephant.n.01', 'name': 'rogue_elephant'}, {'id': 4778, 'synset': 'indian_elephant.n.01', 'name': 'Indian_elephant'}, {'id': 4779, 'synset': 'african_elephant.n.01', 'name': 'African_elephant'}, {'id': 4780, 'synset': 'woolly_mammoth.n.01', 'name': 'woolly_mammoth'}, {'id': 4781, 'synset': 'columbian_mammoth.n.01', 'name': 'columbian_mammoth'}, {'id': 4782, 'synset': 'imperial_mammoth.n.01', 'name': 'imperial_mammoth'}, {'id': 4783, 'synset': 'mastodon.n.01', 'name': 'mastodon'}, {'id': 4784, 'synset': 'plantigrade_mammal.n.01', 'name': 'plantigrade_mammal'}, {'id': 4785, 'synset': 'digitigrade_mammal.n.01', 'name': 'digitigrade_mammal'}, {'id': 4786, 'synset': 'procyonid.n.01', 'name': 'procyonid'}, {'id': 4787, 'synset': 'raccoon.n.02', 'name': 'raccoon'}, {'id': 4788, 'synset': 'common_raccoon.n.01', 'name': 'common_raccoon'}, {'id': 4789, 'synset': 'crab-eating_raccoon.n.01', 'name': 'crab-eating_raccoon'}, {'id': 4790, 'synset': 'bassarisk.n.01', 'name': 'bassarisk'}, {'id': 4791, 'synset': 'kinkajou.n.01', 'name': 'kinkajou'}, {'id': 4792, 'synset': 'coati.n.01', 'name': 'coati'}, {'id': 4793, 'synset': 'lesser_panda.n.01', 'name': 'lesser_panda'}, {'id': 4794, 'synset': 'twitterer.n.01', 'name': 'twitterer'}, {'id': 4795, 'synset': 'fingerling.n.01', 'name': 'fingerling'}, {'id': 4796, 'synset': 'game_fish.n.01', 'name': 'game_fish'}, {'id': 4797, 'synset': 'food_fish.n.01', 'name': 'food_fish'}, {'id': 4798, 'synset': 'rough_fish.n.01', 'name': 'rough_fish'}, {'id': 4799, 'synset': 'groundfish.n.01', 'name': 'groundfish'}, {'id': 4800, 'synset': 'young_fish.n.01', 'name': 'young_fish'}, {'id': 4801, 'synset': 'parr.n.03', 'name': 'parr'}, {'id': 4802, 'synset': 'mouthbreeder.n.01', 'name': 'mouthbreeder'}, {'id': 4803, 'synset': 'spawner.n.01', 'name': 'spawner'}, {'id': 4804, 'synset': 'barracouta.n.01', 'name': 'barracouta'}, {'id': 4805, 'synset': 'crossopterygian.n.01', 'name': 'crossopterygian'}, {'id': 4806, 'synset': 'coelacanth.n.01', 'name': 'coelacanth'}, {'id': 4807, 'synset': 'lungfish.n.01', 'name': 'lungfish'}, {'id': 4808, 'synset': 'ceratodus.n.01', 'name': 'ceratodus'}, {'id': 4809, 'synset': 'catfish.n.03', 'name': 'catfish'}, {'id': 4810, 'synset': 'silurid.n.01', 'name': 'silurid'}, {'id': 4811, 'synset': 'european_catfish.n.01', 'name': 'European_catfish'}, {'id': 4812, 'synset': 'electric_catfish.n.01', 'name': 'electric_catfish'}, {'id': 4813, 'synset': 'bullhead.n.02', 'name': 'bullhead'}, {'id': 4814, 'synset': 'horned_pout.n.01', 'name': 'horned_pout'}, {'id': 4815, 'synset': 'brown_bullhead.n.01', 'name': 'brown_bullhead'}, {'id': 4816, 'synset': 'channel_catfish.n.01', 'name': 'channel_catfish'}, {'id': 4817, 'synset': 'blue_catfish.n.01', 'name': 'blue_catfish'}, {'id': 4818, 'synset': 'flathead_catfish.n.01', 'name': 'flathead_catfish'}, {'id': 4819, 'synset': 'armored_catfish.n.01', 'name': 'armored_catfish'}, {'id': 4820, 'synset': 'sea_catfish.n.01', 'name': 'sea_catfish'}, {'id': 4821, 'synset': 'gadoid.n.01', 'name': 'gadoid'}, {'id': 4822, 'synset': 'cod.n.03', 'name': 'cod'}, {'id': 4823, 'synset': 'codling.n.01', 'name': 'codling'}, {'id': 4824, 'synset': 'atlantic_cod.n.01', 'name': 'Atlantic_cod'}, {'id': 4825, 'synset': 'pacific_cod.n.01', 'name': 'Pacific_cod'}, {'id': 4826, 'synset': 'whiting.n.06', 'name': 'whiting'}, {'id': 4827, 'synset': 'burbot.n.01', 'name': 'burbot'}, {'id': 4828, 'synset': 'haddock.n.02', 'name': 'haddock'}, {'id': 4829, 'synset': 'pollack.n.03', 'name': 'pollack'}, {'id': 4830, 'synset': 'hake.n.02', 'name': 'hake'}, {'id': 4831, 'synset': 'silver_hake.n.01', 'name': 'silver_hake'}, {'id': 4832, 'synset': 'ling.n.04', 'name': 'ling'}, {'id': 4833, 'synset': 'cusk.n.02', 'name': 'cusk'}, {'id': 4834, 'synset': 'grenadier.n.02', 'name': 'grenadier'}, {'id': 4835, 'synset': 'eel.n.02', 'name': 'eel'}, {'id': 4836, 'synset': 'elver.n.02', 'name': 'elver'}, {'id': 4837, 'synset': 'common_eel.n.01', 'name': 'common_eel'}, {'id': 4838, 'synset': 'tuna.n.04', 'name': 'tuna'}, {'id': 4839, 'synset': 'moray.n.01', 'name': 'moray'}, {'id': 4840, 'synset': 'conger.n.01', 'name': 'conger'}, {'id': 4841, 'synset': 'teleost_fish.n.01', 'name': 'teleost_fish'}, {'id': 4842, 'synset': 'beaked_salmon.n.01', 'name': 'beaked_salmon'}, {'id': 4843, 'synset': 'clupeid_fish.n.01', 'name': 'clupeid_fish'}, {'id': 4844, 'synset': 'whitebait.n.02', 'name': 'whitebait'}, {'id': 4845, 'synset': 'brit.n.02', 'name': 'brit'}, {'id': 4846, 'synset': 'shad.n.02', 'name': 'shad'}, {'id': 4847, 'synset': 'common_american_shad.n.01', 'name': 'common_American_shad'}, {'id': 4848, 'synset': 'river_shad.n.01', 'name': 'river_shad'}, {'id': 4849, 'synset': 'allice_shad.n.01', 'name': 'allice_shad'}, {'id': 4850, 'synset': 'alewife.n.02', 'name': 'alewife'}, {'id': 4851, 'synset': 'menhaden.n.01', 'name': 'menhaden'}, {'id': 4852, 'synset': 'herring.n.02', 'name': 'herring'}, {'id': 4853, 'synset': 'atlantic_herring.n.01', 'name': 'Atlantic_herring'}, {'id': 4854, 'synset': 'pacific_herring.n.01', 'name': 'Pacific_herring'}, {'id': 4855, 'synset': 'sardine.n.02', 'name': 'sardine'}, {'id': 4856, 'synset': 'sild.n.01', 'name': 'sild'}, {'id': 4857, 'synset': 'brisling.n.02', 'name': 'brisling'}, {'id': 4858, 'synset': 'pilchard.n.02', 'name': 'pilchard'}, {'id': 4859, 'synset': 'pacific_sardine.n.01', 'name': 'Pacific_sardine'}, {'id': 4860, 'synset': 'anchovy.n.02', 'name': 'anchovy'}, {'id': 4861, 'synset': 'mediterranean_anchovy.n.01', 'name': 'mediterranean_anchovy'}, {'id': 4862, 'synset': 'salmonid.n.01', 'name': 'salmonid'}, {'id': 4863, 'synset': 'parr.n.02', 'name': 'parr'}, {'id': 4864, 'synset': 'blackfish.n.02', 'name': 'blackfish'}, {'id': 4865, 'synset': 'redfish.n.03', 'name': 'redfish'}, {'id': 4866, 'synset': 'atlantic_salmon.n.02', 'name': 'Atlantic_salmon'}, {'id': 4867, 'synset': 'landlocked_salmon.n.01', 'name': 'landlocked_salmon'}, {'id': 4868, 'synset': 'sockeye.n.02', 'name': 'sockeye'}, {'id': 4869, 'synset': 'chinook.n.05', 'name': 'chinook'}, {'id': 4870, 'synset': 'coho.n.02', 'name': 'coho'}, {'id': 4871, 'synset': 'trout.n.02', 'name': 'trout'}, {'id': 4872, 'synset': 'brown_trout.n.01', 'name': 'brown_trout'}, {'id': 4873, 'synset': 'rainbow_trout.n.02', 'name': 'rainbow_trout'}, {'id': 4874, 'synset': 'sea_trout.n.03', 'name': 'sea_trout'}, {'id': 4875, 'synset': 'lake_trout.n.02', 'name': 'lake_trout'}, {'id': 4876, 'synset': 'brook_trout.n.02', 'name': 'brook_trout'}, {'id': 4877, 'synset': 'char.n.03', 'name': 'char'}, {'id': 4878, 'synset': 'arctic_char.n.01', 'name': 'Arctic_char'}, {'id': 4879, 'synset': 'whitefish.n.03', 'name': 'whitefish'}, {'id': 4880, 'synset': 'lake_whitefish.n.01', 'name': 'lake_whitefish'}, {'id': 4881, 'synset': 'cisco.n.02', 'name': 'cisco'}, {'id': 4882, 'synset': 'round_whitefish.n.01', 'name': 'round_whitefish'}, {'id': 4883, 'synset': 'smelt.n.02', 'name': 'smelt'}, {'id': 4884, 'synset': 'sparling.n.02', 'name': 'sparling'}, {'id': 4885, 'synset': 'capelin.n.01', 'name': 'capelin'}, {'id': 4886, 'synset': 'tarpon.n.01', 'name': 'tarpon'}, {'id': 4887, 'synset': 'ladyfish.n.01', 'name': 'ladyfish'}, {'id': 4888, 'synset': 'bonefish.n.01', 'name': 'bonefish'}, {'id': 4889, 'synset': 'argentine.n.01', 'name': 'argentine'}, {'id': 4890, 'synset': 'lanternfish.n.01', 'name': 'lanternfish'}, {'id': 4891, 'synset': 'lizardfish.n.01', 'name': 'lizardfish'}, {'id': 4892, 'synset': 'lancetfish.n.01', 'name': 'lancetfish'}, {'id': 4893, 'synset': 'opah.n.01', 'name': 'opah'}, {'id': 4894, 'synset': 'new_world_opah.n.01', 'name': 'New_World_opah'}, {'id': 4895, 'synset': 'ribbonfish.n.02', 'name': 'ribbonfish'}, {'id': 4896, 'synset': 'dealfish.n.01', 'name': 'dealfish'}, {'id': 4897, 'synset': 'oarfish.n.01', 'name': 'oarfish'}, {'id': 4898, 'synset': 'batfish.n.01', 'name': 'batfish'}, {'id': 4899, 'synset': 'goosefish.n.01', 'name': 'goosefish'}, {'id': 4900, 'synset': 'toadfish.n.01', 'name': 'toadfish'}, {'id': 4901, 'synset': 'oyster_fish.n.01', 'name': 'oyster_fish'}, {'id': 4902, 'synset': 'frogfish.n.01', 'name': 'frogfish'}, {'id': 4903, 'synset': 'sargassum_fish.n.01', 'name': 'sargassum_fish'}, {'id': 4904, 'synset': 'needlefish.n.01', 'name': 'needlefish'}, {'id': 4905, 'synset': 'timucu.n.01', 'name': 'timucu'}, {'id': 4906, 'synset': 'flying_fish.n.01', 'name': 'flying_fish'}, {'id': 4907, 'synset': 'monoplane_flying_fish.n.01', 'name': 'monoplane_flying_fish'}, {'id': 4908, 'synset': 'halfbeak.n.01', 'name': 'halfbeak'}, {'id': 4909, 'synset': 'saury.n.01', 'name': 'saury'}, {'id': 4910, 'synset': 'spiny-finned_fish.n.01', 'name': 'spiny-finned_fish'}, {'id': 4911, 'synset': 'lingcod.n.02', 'name': 'lingcod'}, {'id': 4912, 'synset': 'percoid_fish.n.01', 'name': 'percoid_fish'}, {'id': 4913, 'synset': 'perch.n.07', 'name': 'perch'}, {'id': 4914, 'synset': 'climbing_perch.n.01', 'name': 'climbing_perch'}, {'id': 4915, 'synset': 'perch.n.06', 'name': 'perch'}, {'id': 4916, 'synset': 'yellow_perch.n.01', 'name': 'yellow_perch'}, {'id': 4917, 'synset': 'european_perch.n.01', 'name': 'European_perch'}, {'id': 4918, 'synset': 'pike-perch.n.01', 'name': 'pike-perch'}, {'id': 4919, 'synset': 'walleye.n.02', 'name': 'walleye'}, {'id': 4920, 'synset': 'blue_pike.n.01', 'name': 'blue_pike'}, {'id': 4921, 'synset': 'snail_darter.n.01', 'name': 'snail_darter'}, {'id': 4922, 'synset': 'cusk-eel.n.01', 'name': 'cusk-eel'}, {'id': 4923, 'synset': 'brotula.n.01', 'name': 'brotula'}, {'id': 4924, 'synset': 'pearlfish.n.01', 'name': 'pearlfish'}, {'id': 4925, 'synset': 'robalo.n.01', 'name': 'robalo'}, {'id': 4926, 'synset': 'snook.n.01', 'name': 'snook'}, {'id': 4927, 'synset': 'pike.n.05', 'name': 'pike'}, {'id': 4928, 'synset': 'northern_pike.n.01', 'name': 'northern_pike'}, {'id': 4929, 'synset': 'muskellunge.n.02', 'name': 'muskellunge'}, {'id': 4930, 'synset': 'pickerel.n.02', 'name': 'pickerel'}, {'id': 4931, 'synset': 'chain_pickerel.n.01', 'name': 'chain_pickerel'}, {'id': 4932, 'synset': 'redfin_pickerel.n.01', 'name': 'redfin_pickerel'}, {'id': 4933, 'synset': 'sunfish.n.03', 'name': 'sunfish'}, {'id': 4934, 'synset': 'crappie.n.02', 'name': 'crappie'}, {'id': 4935, 'synset': 'black_crappie.n.01', 'name': 'black_crappie'}, {'id': 4936, 'synset': 'white_crappie.n.01', 'name': 'white_crappie'}, {'id': 4937, 'synset': 'freshwater_bream.n.02', 'name': 'freshwater_bream'}, {'id': 4938, 'synset': 'pumpkinseed.n.01', 'name': 'pumpkinseed'}, {'id': 4939, 'synset': 'bluegill.n.01', 'name': 'bluegill'}, {'id': 4940, 'synset': 'spotted_sunfish.n.01', 'name': 'spotted_sunfish'}, {'id': 4941, 'synset': 'freshwater_bass.n.02', 'name': 'freshwater_bass'}, {'id': 4942, 'synset': 'rock_bass.n.02', 'name': 'rock_bass'}, {'id': 4943, 'synset': 'black_bass.n.02', 'name': 'black_bass'}, {'id': 4944, 'synset': 'kentucky_black_bass.n.01', 'name': 'Kentucky_black_bass'}, {'id': 4945, 'synset': 'smallmouth.n.01', 'name': 'smallmouth'}, {'id': 4946, 'synset': 'largemouth.n.01', 'name': 'largemouth'}, {'id': 4947, 'synset': 'bass.n.08', 'name': 'bass'}, {'id': 4948, 'synset': 'serranid_fish.n.01', 'name': 'serranid_fish'}, {'id': 4949, 'synset': 'white_perch.n.01', 'name': 'white_perch'}, {'id': 4950, 'synset': 'yellow_bass.n.01', 'name': 'yellow_bass'}, {'id': 4951, 'synset': 'blackmouth_bass.n.01', 'name': 'blackmouth_bass'}, {'id': 4952, 'synset': 'rock_sea_bass.n.01', 'name': 'rock_sea_bass'}, {'id': 4953, 'synset': 'striped_bass.n.02', 'name': 'striped_bass'}, {'id': 4954, 'synset': 'stone_bass.n.01', 'name': 'stone_bass'}, {'id': 4955, 'synset': 'grouper.n.02', 'name': 'grouper'}, {'id': 4956, 'synset': 'hind.n.01', 'name': 'hind'}, {'id': 4957, 'synset': 'rock_hind.n.01', 'name': 'rock_hind'}, {'id': 4958, 'synset': 'creole-fish.n.01', 'name': 'creole-fish'}, {'id': 4959, 'synset': 'jewfish.n.02', 'name': 'jewfish'}, {'id': 4960, 'synset': 'soapfish.n.01', 'name': 'soapfish'}, {'id': 4961, 'synset': 'surfperch.n.01', 'name': 'surfperch'}, {'id': 4962, 'synset': 'rainbow_seaperch.n.01', 'name': 'rainbow_seaperch'}, {'id': 4963, 'synset': 'bigeye.n.01', 'name': 'bigeye'}, {'id': 4964, 'synset': 'catalufa.n.01', 'name': 'catalufa'}, {'id': 4965, 'synset': 'cardinalfish.n.01', 'name': 'cardinalfish'}, {'id': 4966, 'synset': 'flame_fish.n.01', 'name': 'flame_fish'}, {'id': 4967, 'synset': 'tilefish.n.02', 'name': 'tilefish'}, {'id': 4968, 'synset': 'bluefish.n.01', 'name': 'bluefish'}, {'id': 4969, 'synset': 'cobia.n.01', 'name': 'cobia'}, {'id': 4970, 'synset': 'remora.n.01', 'name': 'remora'}, {'id': 4971, 'synset': 'sharksucker.n.01', 'name': 'sharksucker'}, {'id': 4972, 'synset': 'whale_sucker.n.01', 'name': 'whale_sucker'}, {'id': 4973, 'synset': 'carangid_fish.n.01', 'name': 'carangid_fish'}, {'id': 4974, 'synset': 'jack.n.11', 'name': 'jack'}, {'id': 4975, 'synset': 'crevalle_jack.n.01', 'name': 'crevalle_jack'}, {'id': 4976, 'synset': 'yellow_jack.n.03', 'name': 'yellow_jack'}, {'id': 4977, 'synset': 'runner.n.10', 'name': 'runner'}, {'id': 4978, 'synset': 'rainbow_runner.n.01', 'name': 'rainbow_runner'}, {'id': 4979, 'synset': 'leatherjacket.n.02', 'name': 'leatherjacket'}, {'id': 4980, 'synset': 'threadfish.n.01', 'name': 'threadfish'}, {'id': 4981, 'synset': 'moonfish.n.01', 'name': 'moonfish'}, {'id': 4982, 'synset': 'lookdown.n.01', 'name': 'lookdown'}, {'id': 4983, 'synset': 'amberjack.n.01', 'name': 'amberjack'}, {'id': 4984, 'synset': 'yellowtail.n.02', 'name': 'yellowtail'}, {'id': 4985, 'synset': 'kingfish.n.05', 'name': 'kingfish'}, {'id': 4986, 'synset': 'pompano.n.02', 'name': 'pompano'}, {'id': 4987, 'synset': 'florida_pompano.n.01', 'name': 'Florida_pompano'}, {'id': 4988, 'synset': 'permit.n.03', 'name': 'permit'}, {'id': 4989, 'synset': 'scad.n.01', 'name': 'scad'}, {'id': 4990, 'synset': 'horse_mackerel.n.03', 'name': 'horse_mackerel'}, {'id': 4991, 'synset': 'horse_mackerel.n.02', 'name': 'horse_mackerel'}, {'id': 4992, 'synset': 'bigeye_scad.n.01', 'name': 'bigeye_scad'}, {'id': 4993, 'synset': 'mackerel_scad.n.01', 'name': 'mackerel_scad'}, {'id': 4994, 'synset': 'round_scad.n.01', 'name': 'round_scad'}, {'id': 4995, 'synset': 'dolphinfish.n.02', 'name': 'dolphinfish'}, {'id': 4996, 'synset': 'coryphaena_hippurus.n.01', 'name': 'Coryphaena_hippurus'}, {'id': 4997, 'synset': 'coryphaena_equisetis.n.01', 'name': 'Coryphaena_equisetis'}, {'id': 4998, 'synset': 'pomfret.n.01', 'name': 'pomfret'}, {'id': 4999, 'synset': 'characin.n.01', 'name': 'characin'}, {'id': 5000, 'synset': 'tetra.n.01', 'name': 'tetra'}, {'id': 5001, 'synset': 'cardinal_tetra.n.01', 'name': 'cardinal_tetra'}, {'id': 5002, 'synset': 'piranha.n.02', 'name': 'piranha'}, {'id': 5003, 'synset': 'cichlid.n.01', 'name': 'cichlid'}, {'id': 5004, 'synset': 'bolti.n.01', 'name': 'bolti'}, {'id': 5005, 'synset': 'snapper.n.05', 'name': 'snapper'}, {'id': 5006, 'synset': 'red_snapper.n.02', 'name': 'red_snapper'}, {'id': 5007, 'synset': 'grey_snapper.n.01', 'name': 'grey_snapper'}, {'id': 5008, 'synset': 'mutton_snapper.n.01', 'name': 'mutton_snapper'}, {'id': 5009, 'synset': 'schoolmaster.n.03', 'name': 'schoolmaster'}, {'id': 5010, 'synset': 'yellowtail.n.01', 'name': 'yellowtail'}, {'id': 5011, 'synset': 'grunt.n.03', 'name': 'grunt'}, {'id': 5012, 'synset': 'margate.n.01', 'name': 'margate'}, {'id': 5013, 'synset': 'spanish_grunt.n.01', 'name': 'Spanish_grunt'}, {'id': 5014, 'synset': 'tomtate.n.01', 'name': 'tomtate'}, {'id': 5015, 'synset': 'cottonwick.n.01', 'name': 'cottonwick'}, {'id': 5016, 'synset': "sailor's-choice.n.02", 'name': "sailor's-choice"}, {'id': 5017, 'synset': 'porkfish.n.01', 'name': 'porkfish'}, {'id': 5018, 'synset': 'pompon.n.02', 'name': 'pompon'}, {'id': 5019, 'synset': 'pigfish.n.02', 'name': 'pigfish'}, {'id': 5020, 'synset': 'sparid.n.01', 'name': 'sparid'}, {'id': 5021, 'synset': 'sea_bream.n.02', 'name': 'sea_bream'}, {'id': 5022, 'synset': 'porgy.n.02', 'name': 'porgy'}, {'id': 5023, 'synset': 'red_porgy.n.01', 'name': 'red_porgy'}, {'id': 5024, 'synset': 'european_sea_bream.n.01', 'name': 'European_sea_bream'}, {'id': 5025, 'synset': 'atlantic_sea_bream.n.01', 'name': 'Atlantic_sea_bream'}, {'id': 5026, 'synset': 'sheepshead.n.01', 'name': 'sheepshead'}, {'id': 5027, 'synset': 'pinfish.n.01', 'name': 'pinfish'}, {'id': 5028, 'synset': 'sheepshead_porgy.n.01', 'name': 'sheepshead_porgy'}, {'id': 5029, 'synset': 'snapper.n.04', 'name': 'snapper'}, {'id': 5030, 'synset': 'black_bream.n.01', 'name': 'black_bream'}, {'id': 5031, 'synset': 'scup.n.04', 'name': 'scup'}, {'id': 5032, 'synset': 'scup.n.03', 'name': 'scup'}, {'id': 5033, 'synset': 'sciaenid_fish.n.01', 'name': 'sciaenid_fish'}, {'id': 5034, 'synset': 'striped_drum.n.01', 'name': 'striped_drum'}, {'id': 5035, 'synset': 'jackknife-fish.n.01', 'name': 'jackknife-fish'}, {'id': 5036, 'synset': 'silver_perch.n.01', 'name': 'silver_perch'}, {'id': 5037, 'synset': 'red_drum.n.01', 'name': 'red_drum'}, {'id': 5038, 'synset': 'mulloway.n.01', 'name': 'mulloway'}, {'id': 5039, 'synset': 'maigre.n.01', 'name': 'maigre'}, {'id': 5040, 'synset': 'croaker.n.02', 'name': 'croaker'}, {'id': 5041, 'synset': 'atlantic_croaker.n.01', 'name': 'Atlantic_croaker'}, {'id': 5042, 'synset': 'yellowfin_croaker.n.01', 'name': 'yellowfin_croaker'}, {'id': 5043, 'synset': 'whiting.n.04', 'name': 'whiting'}, {'id': 5044, 'synset': 'kingfish.n.04', 'name': 'kingfish'}, {'id': 5045, 'synset': 'king_whiting.n.01', 'name': 'king_whiting'}, {'id': 5046, 'synset': 'northern_whiting.n.01', 'name': 'northern_whiting'}, {'id': 5047, 'synset': 'corbina.n.01', 'name': 'corbina'}, {'id': 5048, 'synset': 'white_croaker.n.02', 'name': 'white_croaker'}, {'id': 5049, 'synset': 'white_croaker.n.01', 'name': 'white_croaker'}, {'id': 5050, 'synset': 'sea_trout.n.02', 'name': 'sea_trout'}, {'id': 5051, 'synset': 'weakfish.n.02', 'name': 'weakfish'}, {'id': 5052, 'synset': 'spotted_weakfish.n.01', 'name': 'spotted_weakfish'}, {'id': 5053, 'synset': 'mullet.n.03', 'name': 'mullet'}, {'id': 5054, 'synset': 'goatfish.n.01', 'name': 'goatfish'}, {'id': 5055, 'synset': 'red_goatfish.n.01', 'name': 'red_goatfish'}, {'id': 5056, 'synset': 'yellow_goatfish.n.01', 'name': 'yellow_goatfish'}, {'id': 5057, 'synset': 'mullet.n.02', 'name': 'mullet'}, {'id': 5058, 'synset': 'striped_mullet.n.01', 'name': 'striped_mullet'}, {'id': 5059, 'synset': 'white_mullet.n.01', 'name': 'white_mullet'}, {'id': 5060, 'synset': 'liza.n.01', 'name': 'liza'}, {'id': 5061, 'synset': 'silversides.n.01', 'name': 'silversides'}, {'id': 5062, 'synset': 'jacksmelt.n.01', 'name': 'jacksmelt'}, {'id': 5063, 'synset': 'barracuda.n.01', 'name': 'barracuda'}, {'id': 5064, 'synset': 'great_barracuda.n.01', 'name': 'great_barracuda'}, {'id': 5065, 'synset': 'sweeper.n.03', 'name': 'sweeper'}, {'id': 5066, 'synset': 'sea_chub.n.01', 'name': 'sea_chub'}, {'id': 5067, 'synset': 'bermuda_chub.n.01', 'name': 'Bermuda_chub'}, {'id': 5068, 'synset': 'spadefish.n.01', 'name': 'spadefish'}, {'id': 5069, 'synset': 'butterfly_fish.n.01', 'name': 'butterfly_fish'}, {'id': 5070, 'synset': 'chaetodon.n.01', 'name': 'chaetodon'}, {'id': 5071, 'synset': 'angelfish.n.01', 'name': 'angelfish'}, {'id': 5072, 'synset': 'rock_beauty.n.01', 'name': 'rock_beauty'}, {'id': 5073, 'synset': 'damselfish.n.01', 'name': 'damselfish'}, {'id': 5074, 'synset': 'beaugregory.n.01', 'name': 'beaugregory'}, {'id': 5075, 'synset': 'anemone_fish.n.01', 'name': 'anemone_fish'}, {'id': 5076, 'synset': 'clown_anemone_fish.n.01', 'name': 'clown_anemone_fish'}, {'id': 5077, 'synset': 'sergeant_major.n.02', 'name': 'sergeant_major'}, {'id': 5078, 'synset': 'wrasse.n.01', 'name': 'wrasse'}, {'id': 5079, 'synset': 'pigfish.n.01', 'name': 'pigfish'}, {'id': 5080, 'synset': 'hogfish.n.01', 'name': 'hogfish'}, {'id': 5081, 'synset': 'slippery_dick.n.01', 'name': 'slippery_dick'}, {'id': 5082, 'synset': 'puddingwife.n.01', 'name': 'puddingwife'}, {'id': 5083, 'synset': 'bluehead.n.01', 'name': 'bluehead'}, {'id': 5084, 'synset': 'pearly_razorfish.n.01', 'name': 'pearly_razorfish'}, {'id': 5085, 'synset': 'tautog.n.01', 'name': 'tautog'}, {'id': 5086, 'synset': 'cunner.n.01', 'name': 'cunner'}, {'id': 5087, 'synset': 'parrotfish.n.01', 'name': 'parrotfish'}, {'id': 5088, 'synset': 'threadfin.n.01', 'name': 'threadfin'}, {'id': 5089, 'synset': 'jawfish.n.01', 'name': 'jawfish'}, {'id': 5090, 'synset': 'stargazer.n.03', 'name': 'stargazer'}, {'id': 5091, 'synset': 'sand_stargazer.n.01', 'name': 'sand_stargazer'}, {'id': 5092, 'synset': 'blenny.n.01', 'name': 'blenny'}, {'id': 5093, 'synset': 'shanny.n.01', 'name': 'shanny'}, {'id': 5094, 'synset': 'molly_miller.n.01', 'name': 'Molly_Miller'}, {'id': 5095, 'synset': 'clinid.n.01', 'name': 'clinid'}, {'id': 5096, 'synset': 'pikeblenny.n.01', 'name': 'pikeblenny'}, {'id': 5097, 'synset': 'bluethroat_pikeblenny.n.01', 'name': 'bluethroat_pikeblenny'}, {'id': 5098, 'synset': 'gunnel.n.02', 'name': 'gunnel'}, {'id': 5099, 'synset': 'rock_gunnel.n.01', 'name': 'rock_gunnel'}, {'id': 5100, 'synset': 'eelblenny.n.01', 'name': 'eelblenny'}, {'id': 5101, 'synset': 'wrymouth.n.01', 'name': 'wrymouth'}, {'id': 5102, 'synset': 'wolffish.n.01', 'name': 'wolffish'}, {'id': 5103, 'synset': 'viviparous_eelpout.n.01', 'name': 'viviparous_eelpout'}, {'id': 5104, 'synset': 'ocean_pout.n.01', 'name': 'ocean_pout'}, {'id': 5105, 'synset': 'sand_lance.n.01', 'name': 'sand_lance'}, {'id': 5106, 'synset': 'dragonet.n.01', 'name': 'dragonet'}, {'id': 5107, 'synset': 'goby.n.01', 'name': 'goby'}, {'id': 5108, 'synset': 'mudskipper.n.01', 'name': 'mudskipper'}, {'id': 5109, 'synset': 'sleeper.n.08', 'name': 'sleeper'}, {'id': 5110, 'synset': 'flathead.n.02', 'name': 'flathead'}, {'id': 5111, 'synset': 'archerfish.n.01', 'name': 'archerfish'}, {'id': 5112, 'synset': 'surgeonfish.n.01', 'name': 'surgeonfish'}, {'id': 5113, 'synset': 'gempylid.n.01', 'name': 'gempylid'}, {'id': 5114, 'synset': 'snake_mackerel.n.01', 'name': 'snake_mackerel'}, {'id': 5115, 'synset': 'escolar.n.01', 'name': 'escolar'}, {'id': 5116, 'synset': 'oilfish.n.01', 'name': 'oilfish'}, {'id': 5117, 'synset': 'cutlassfish.n.01', 'name': 'cutlassfish'}, {'id': 5118, 'synset': 'scombroid.n.01', 'name': 'scombroid'}, {'id': 5119, 'synset': 'mackerel.n.02', 'name': 'mackerel'}, {'id': 5120, 'synset': 'common_mackerel.n.01', 'name': 'common_mackerel'}, {'id': 5121, 'synset': 'spanish_mackerel.n.03', 'name': 'Spanish_mackerel'}, {'id': 5122, 'synset': 'chub_mackerel.n.01', 'name': 'chub_mackerel'}, {'id': 5123, 'synset': 'wahoo.n.03', 'name': 'wahoo'}, {'id': 5124, 'synset': 'spanish_mackerel.n.02', 'name': 'Spanish_mackerel'}, {'id': 5125, 'synset': 'king_mackerel.n.01', 'name': 'king_mackerel'}, {'id': 5126, 'synset': 'scomberomorus_maculatus.n.01', 'name': 'Scomberomorus_maculatus'}, {'id': 5127, 'synset': 'cero.n.01', 'name': 'cero'}, {'id': 5128, 'synset': 'sierra.n.02', 'name': 'sierra'}, {'id': 5129, 'synset': 'tuna.n.03', 'name': 'tuna'}, {'id': 5130, 'synset': 'albacore.n.02', 'name': 'albacore'}, {'id': 5131, 'synset': 'bluefin.n.02', 'name': 'bluefin'}, {'id': 5132, 'synset': 'yellowfin.n.01', 'name': 'yellowfin'}, {'id': 5133, 'synset': 'bonito.n.03', 'name': 'bonito'}, {'id': 5134, 'synset': 'skipjack.n.02', 'name': 'skipjack'}, {'id': 5135, 'synset': 'chile_bonito.n.01', 'name': 'Chile_bonito'}, {'id': 5136, 'synset': 'skipjack.n.01', 'name': 'skipjack'}, {'id': 5137, 'synset': 'bonito.n.02', 'name': 'bonito'}, {'id': 5138, 'synset': 'swordfish.n.02', 'name': 'swordfish'}, {'id': 5139, 'synset': 'sailfish.n.02', 'name': 'sailfish'}, {'id': 5140, 'synset': 'atlantic_sailfish.n.01', 'name': 'Atlantic_sailfish'}, {'id': 5141, 'synset': 'billfish.n.02', 'name': 'billfish'}, {'id': 5142, 'synset': 'marlin.n.01', 'name': 'marlin'}, {'id': 5143, 'synset': 'blue_marlin.n.01', 'name': 'blue_marlin'}, {'id': 5144, 'synset': 'black_marlin.n.01', 'name': 'black_marlin'}, {'id': 5145, 'synset': 'striped_marlin.n.01', 'name': 'striped_marlin'}, {'id': 5146, 'synset': 'white_marlin.n.01', 'name': 'white_marlin'}, {'id': 5147, 'synset': 'spearfish.n.01', 'name': 'spearfish'}, {'id': 5148, 'synset': 'louvar.n.01', 'name': 'louvar'}, {'id': 5149, 'synset': 'dollarfish.n.01', 'name': 'dollarfish'}, {'id': 5150, 'synset': 'palometa.n.01', 'name': 'palometa'}, {'id': 5151, 'synset': 'harvestfish.n.01', 'name': 'harvestfish'}, {'id': 5152, 'synset': 'driftfish.n.01', 'name': 'driftfish'}, {'id': 5153, 'synset': 'barrelfish.n.01', 'name': 'barrelfish'}, {'id': 5154, 'synset': 'clingfish.n.01', 'name': 'clingfish'}, {'id': 5155, 'synset': 'tripletail.n.01', 'name': 'tripletail'}, {'id': 5156, 'synset': 'atlantic_tripletail.n.01', 'name': 'Atlantic_tripletail'}, {'id': 5157, 'synset': 'pacific_tripletail.n.01', 'name': 'Pacific_tripletail'}, {'id': 5158, 'synset': 'mojarra.n.01', 'name': 'mojarra'}, {'id': 5159, 'synset': 'yellowfin_mojarra.n.01', 'name': 'yellowfin_mojarra'}, {'id': 5160, 'synset': 'silver_jenny.n.01', 'name': 'silver_jenny'}, {'id': 5161, 'synset': 'whiting.n.03', 'name': 'whiting'}, {'id': 5162, 'synset': 'ganoid.n.01', 'name': 'ganoid'}, {'id': 5163, 'synset': 'bowfin.n.01', 'name': 'bowfin'}, {'id': 5164, 'synset': 'paddlefish.n.01', 'name': 'paddlefish'}, {'id': 5165, 'synset': 'chinese_paddlefish.n.01', 'name': 'Chinese_paddlefish'}, {'id': 5166, 'synset': 'sturgeon.n.01', 'name': 'sturgeon'}, {'id': 5167, 'synset': 'pacific_sturgeon.n.01', 'name': 'Pacific_sturgeon'}, {'id': 5168, 'synset': 'beluga.n.01', 'name': 'beluga'}, {'id': 5169, 'synset': 'gar.n.01', 'name': 'gar'}, {'id': 5170, 'synset': 'scorpaenoid.n.01', 'name': 'scorpaenoid'}, {'id': 5171, 'synset': 'scorpaenid.n.01', 'name': 'scorpaenid'}, {'id': 5172, 'synset': 'scorpionfish.n.01', 'name': 'scorpionfish'}, {'id': 5173, 'synset': 'plumed_scorpionfish.n.01', 'name': 'plumed_scorpionfish'}, {'id': 5174, 'synset': 'lionfish.n.01', 'name': 'lionfish'}, {'id': 5175, 'synset': 'stonefish.n.01', 'name': 'stonefish'}, {'id': 5176, 'synset': 'rockfish.n.02', 'name': 'rockfish'}, {'id': 5177, 'synset': 'copper_rockfish.n.01', 'name': 'copper_rockfish'}, {'id': 5178, 'synset': 'vermillion_rockfish.n.01', 'name': 'vermillion_rockfish'}, {'id': 5179, 'synset': 'red_rockfish.n.02', 'name': 'red_rockfish'}, {'id': 5180, 'synset': 'rosefish.n.02', 'name': 'rosefish'}, {'id': 5181, 'synset': 'bullhead.n.01', 'name': 'bullhead'}, {'id': 5182, 'synset': "miller's-thumb.n.01", 'name': "miller's-thumb"}, {'id': 5183, 'synset': 'sea_raven.n.01', 'name': 'sea_raven'}, {'id': 5184, 'synset': 'lumpfish.n.01', 'name': 'lumpfish'}, {'id': 5185, 'synset': 'lumpsucker.n.01', 'name': 'lumpsucker'}, {'id': 5186, 'synset': 'pogge.n.01', 'name': 'pogge'}, {'id': 5187, 'synset': 'greenling.n.01', 'name': 'greenling'}, {'id': 5188, 'synset': 'kelp_greenling.n.01', 'name': 'kelp_greenling'}, {'id': 5189, 'synset': 'painted_greenling.n.01', 'name': 'painted_greenling'}, {'id': 5190, 'synset': 'flathead.n.01', 'name': 'flathead'}, {'id': 5191, 'synset': 'gurnard.n.01', 'name': 'gurnard'}, {'id': 5192, 'synset': 'tub_gurnard.n.01', 'name': 'tub_gurnard'}, {'id': 5193, 'synset': 'sea_robin.n.01', 'name': 'sea_robin'}, {'id': 5194, 'synset': 'northern_sea_robin.n.01', 'name': 'northern_sea_robin'}, {'id': 5195, 'synset': 'flying_gurnard.n.01', 'name': 'flying_gurnard'}, {'id': 5196, 'synset': 'plectognath.n.01', 'name': 'plectognath'}, {'id': 5197, 'synset': 'triggerfish.n.01', 'name': 'triggerfish'}, {'id': 5198, 'synset': 'queen_triggerfish.n.01', 'name': 'queen_triggerfish'}, {'id': 5199, 'synset': 'filefish.n.01', 'name': 'filefish'}, {'id': 5200, 'synset': 'leatherjacket.n.01', 'name': 'leatherjacket'}, {'id': 5201, 'synset': 'boxfish.n.01', 'name': 'boxfish'}, {'id': 5202, 'synset': 'cowfish.n.01', 'name': 'cowfish'}, {'id': 5203, 'synset': 'spiny_puffer.n.01', 'name': 'spiny_puffer'}, {'id': 5204, 'synset': 'porcupinefish.n.01', 'name': 'porcupinefish'}, {'id': 5205, 'synset': 'balloonfish.n.01', 'name': 'balloonfish'}, {'id': 5206, 'synset': 'burrfish.n.01', 'name': 'burrfish'}, {'id': 5207, 'synset': 'ocean_sunfish.n.01', 'name': 'ocean_sunfish'}, {'id': 5208, 'synset': 'sharptail_mola.n.01', 'name': 'sharptail_mola'}, {'id': 5209, 'synset': 'flatfish.n.02', 'name': 'flatfish'}, {'id': 5210, 'synset': 'flounder.n.02', 'name': 'flounder'}, {'id': 5211, 'synset': 'righteye_flounder.n.01', 'name': 'righteye_flounder'}, {'id': 5212, 'synset': 'plaice.n.02', 'name': 'plaice'}, {'id': 5213, 'synset': 'european_flatfish.n.01', 'name': 'European_flatfish'}, {'id': 5214, 'synset': 'yellowtail_flounder.n.02', 'name': 'yellowtail_flounder'}, {'id': 5215, 'synset': 'winter_flounder.n.02', 'name': 'winter_flounder'}, {'id': 5216, 'synset': 'lemon_sole.n.05', 'name': 'lemon_sole'}, {'id': 5217, 'synset': 'american_plaice.n.01', 'name': 'American_plaice'}, {'id': 5218, 'synset': 'halibut.n.02', 'name': 'halibut'}, {'id': 5219, 'synset': 'atlantic_halibut.n.01', 'name': 'Atlantic_halibut'}, {'id': 5220, 'synset': 'pacific_halibut.n.01', 'name': 'Pacific_halibut'}, {'id': 5221, 'synset': 'lefteye_flounder.n.01', 'name': 'lefteye_flounder'}, {'id': 5222, 'synset': 'southern_flounder.n.01', 'name': 'southern_flounder'}, {'id': 5223, 'synset': 'summer_flounder.n.01', 'name': 'summer_flounder'}, {'id': 5224, 'synset': 'whiff.n.02', 'name': 'whiff'}, {'id': 5225, 'synset': 'horned_whiff.n.01', 'name': 'horned_whiff'}, {'id': 5226, 'synset': 'sand_dab.n.02', 'name': 'sand_dab'}, {'id': 5227, 'synset': 'windowpane.n.02', 'name': 'windowpane'}, {'id': 5228, 'synset': 'brill.n.01', 'name': 'brill'}, {'id': 5229, 'synset': 'turbot.n.02', 'name': 'turbot'}, {'id': 5230, 'synset': 'tonguefish.n.01', 'name': 'tonguefish'}, {'id': 5231, 'synset': 'sole.n.04', 'name': 'sole'}, {'id': 5232, 'synset': 'european_sole.n.01', 'name': 'European_sole'}, {'id': 5233, 'synset': 'english_sole.n.02', 'name': 'English_sole'}, {'id': 5234, 'synset': 'hogchoker.n.01', 'name': 'hogchoker'}, {'id': 5235, 'synset': 'aba.n.02', 'name': 'aba'}, {'id': 5236, 'synset': 'abacus.n.02', 'name': 'abacus'}, {'id': 5237, 'synset': 'abandoned_ship.n.01', 'name': 'abandoned_ship'}, {'id': 5238, 'synset': 'a_battery.n.01', 'name': 'A_battery'}, {'id': 5239, 'synset': 'abattoir.n.01', 'name': 'abattoir'}, {'id': 5240, 'synset': 'abaya.n.01', 'name': 'abaya'}, {'id': 5241, 'synset': 'abbe_condenser.n.01', 'name': 'Abbe_condenser'}, {'id': 5242, 'synset': 'abbey.n.03', 'name': 'abbey'}, {'id': 5243, 'synset': 'abbey.n.02', 'name': 'abbey'}, {'id': 5244, 'synset': 'abbey.n.01', 'name': 'abbey'}, {'id': 5245, 'synset': 'abney_level.n.01', 'name': 'Abney_level'}, {'id': 5246, 'synset': 'abrader.n.01', 'name': 'abrader'}, {'id': 5247, 'synset': 'abrading_stone.n.01', 'name': 'abrading_stone'}, {'id': 5248, 'synset': 'abutment.n.02', 'name': 'abutment'}, {'id': 5249, 'synset': 'abutment_arch.n.01', 'name': 'abutment_arch'}, {'id': 5250, 'synset': 'academic_costume.n.01', 'name': 'academic_costume'}, {'id': 5251, 'synset': 'academic_gown.n.01', 'name': 'academic_gown'}, {'id': 5252, 'synset': 'accelerator.n.02', 'name': 'accelerator'}, {'id': 5253, 'synset': 'accelerator.n.04', 'name': 'accelerator'}, {'id': 5254, 'synset': 'accelerator.n.01', 'name': 'accelerator'}, {'id': 5255, 'synset': 'accelerometer.n.01', 'name': 'accelerometer'}, {'id': 5256, 'synset': 'accessory.n.01', 'name': 'accessory'}, {'id': 5257, 'synset': 'accommodating_lens_implant.n.01', 'name': 'accommodating_lens_implant'}, {'id': 5258, 'synset': 'accommodation.n.04', 'name': 'accommodation'}, {'id': 5259, 'synset': 'accordion.n.01', 'name': 'accordion'}, {'id': 5260, 'synset': 'acetate_disk.n.01', 'name': 'acetate_disk'}, {'id': 5261, 'synset': 'acetate_rayon.n.01', 'name': 'acetate_rayon'}, {'id': 5262, 'synset': 'achromatic_lens.n.01', 'name': 'achromatic_lens'}, {'id': 5263, 'synset': 'acoustic_delay_line.n.01', 'name': 'acoustic_delay_line'}, {'id': 5264, 'synset': 'acoustic_device.n.01', 'name': 'acoustic_device'}, {'id': 5265, 'synset': 'acoustic_guitar.n.01', 'name': 'acoustic_guitar'}, {'id': 5266, 'synset': 'acoustic_modem.n.01', 'name': 'acoustic_modem'}, {'id': 5267, 'synset': 'acropolis.n.01', 'name': 'acropolis'}, {'id': 5268, 'synset': 'acrylic.n.04', 'name': 'acrylic'}, {'id': 5269, 'synset': 'acrylic.n.03', 'name': 'acrylic'}, {'id': 5270, 'synset': 'actinometer.n.01', 'name': 'actinometer'}, {'id': 5271, 'synset': 'action.n.07', 'name': 'action'}, {'id': 5272, 'synset': 'active_matrix_screen.n.01', 'name': 'active_matrix_screen'}, {'id': 5273, 'synset': 'actuator.n.01', 'name': 'actuator'}, {'id': 5274, 'synset': 'adapter.n.02', 'name': 'adapter'}, {'id': 5275, 'synset': 'adder.n.02', 'name': 'adder'}, {'id': 5276, 'synset': 'adding_machine.n.01', 'name': 'adding_machine'}, {'id': 5277, 'synset': 'addressing_machine.n.01', 'name': 'addressing_machine'}, {'id': 5278, 'synset': 'adhesive_bandage.n.01', 'name': 'adhesive_bandage'}, {'id': 5279, 'synset': 'adit.n.01', 'name': 'adit'}, {'id': 5280, 'synset': 'adjoining_room.n.01', 'name': 'adjoining_room'}, {'id': 5281, 'synset': 'adjustable_wrench.n.01', 'name': 'adjustable_wrench'}, {'id': 5282, 'synset': 'adobe.n.02', 'name': 'adobe'}, {'id': 5283, 'synset': 'adz.n.01', 'name': 'adz'}, {'id': 5284, 'synset': 'aeolian_harp.n.01', 'name': 'aeolian_harp'}, {'id': 5285, 'synset': 'aerator.n.01', 'name': 'aerator'}, {'id': 5286, 'synset': 'aerial_torpedo.n.01', 'name': 'aerial_torpedo'}, {'id': 5287, 'synset': 'aertex.n.01', 'name': 'Aertex'}, {'id': 5288, 'synset': 'afghan.n.01', 'name': 'afghan'}, {'id': 5289, 'synset': 'afro-wig.n.01', 'name': 'Afro-wig'}, {'id': 5290, 'synset': 'afterburner.n.01', 'name': 'afterburner'}, {'id': 5291, 'synset': 'after-shave.n.01', 'name': 'after-shave'}, {'id': 5292, 'synset': 'agateware.n.01', 'name': 'agateware'}, {'id': 5293, 'synset': 'agglomerator.n.01', 'name': 'agglomerator'}, {'id': 5294, 'synset': 'aglet.n.02', 'name': 'aglet'}, {'id': 5295, 'synset': 'aglet.n.01', 'name': 'aglet'}, {'id': 5296, 'synset': 'agora.n.03', 'name': 'agora'}, {'id': 5297, 'synset': 'aigrette.n.01', 'name': 'aigrette'}, {'id': 5298, 'synset': 'aileron.n.01', 'name': 'aileron'}, {'id': 5299, 'synset': 'air_bag.n.01', 'name': 'air_bag'}, {'id': 5300, 'synset': 'airbrake.n.02', 'name': 'airbrake'}, {'id': 5301, 'synset': 'airbrush.n.01', 'name': 'airbrush'}, {'id': 5302, 'synset': 'airbus.n.01', 'name': 'airbus'}, {'id': 5303, 'synset': 'air_compressor.n.01', 'name': 'air_compressor'}, {'id': 5304, 'synset': 'aircraft.n.01', 'name': 'aircraft'}, {'id': 5305, 'synset': 'aircraft_carrier.n.01', 'name': 'aircraft_carrier'}, {'id': 5306, 'synset': 'aircraft_engine.n.01', 'name': 'aircraft_engine'}, {'id': 5307, 'synset': 'air_cushion.n.02', 'name': 'air_cushion'}, {'id': 5308, 'synset': 'airdock.n.01', 'name': 'airdock'}, {'id': 5309, 'synset': 'airfield.n.01', 'name': 'airfield'}, {'id': 5310, 'synset': 'air_filter.n.01', 'name': 'air_filter'}, {'id': 5311, 'synset': 'airfoil.n.01', 'name': 'airfoil'}, {'id': 5312, 'synset': 'airframe.n.01', 'name': 'airframe'}, {'id': 5313, 'synset': 'air_gun.n.01', 'name': 'air_gun'}, {'id': 5314, 'synset': 'air_hammer.n.01', 'name': 'air_hammer'}, {'id': 5315, 'synset': 'air_horn.n.01', 'name': 'air_horn'}, {'id': 5316, 'synset': 'airing_cupboard.n.01', 'name': 'airing_cupboard'}, {'id': 5317, 'synset': 'airliner.n.01', 'name': 'airliner'}, {'id': 5318, 'synset': 'airmailer.n.01', 'name': 'airmailer'}, {'id': 5319, 'synset': 'airplane_propeller.n.01', 'name': 'airplane_propeller'}, {'id': 5320, 'synset': 'airport.n.01', 'name': 'airport'}, {'id': 5321, 'synset': 'air_pump.n.01', 'name': 'air_pump'}, {'id': 5322, 'synset': 'air_search_radar.n.01', 'name': 'air_search_radar'}, {'id': 5323, 'synset': 'airship.n.01', 'name': 'airship'}, {'id': 5324, 'synset': 'air_terminal.n.01', 'name': 'air_terminal'}, {'id': 5325, 'synset': 'air-to-air_missile.n.01', 'name': 'air-to-air_missile'}, {'id': 5326, 'synset': 'air-to-ground_missile.n.01', 'name': 'air-to-ground_missile'}, {'id': 5327, 'synset': 'aisle.n.03', 'name': 'aisle'}, {'id': 5328, 'synset': "aladdin's_lamp.n.01", 'name': "Aladdin's_lamp"}, {'id': 5329, 'synset': 'alarm.n.02', 'name': 'alarm'}, {'id': 5330, 'synset': 'alb.n.01', 'name': 'alb'}, {'id': 5331, 'synset': 'alcazar.n.01', 'name': 'alcazar'}, {'id': 5332, 'synset': 'alcohol_thermometer.n.01', 'name': 'alcohol_thermometer'}, {'id': 5333, 'synset': 'alehouse.n.01', 'name': 'alehouse'}, {'id': 5334, 'synset': 'alembic.n.01', 'name': 'alembic'}, {'id': 5335, 'synset': 'algometer.n.01', 'name': 'algometer'}, {'id': 5336, 'synset': 'alidade.n.02', 'name': 'alidade'}, {'id': 5337, 'synset': 'alidade.n.01', 'name': 'alidade'}, {'id': 5338, 'synset': 'a-line.n.01', 'name': 'A-line'}, {'id': 5339, 'synset': 'allen_screw.n.01', 'name': 'Allen_screw'}, {'id': 5340, 'synset': 'allen_wrench.n.01', 'name': 'Allen_wrench'}, {'id': 5341, 'synset': 'alligator_wrench.n.01', 'name': 'alligator_wrench'}, {'id': 5342, 'synset': 'alms_dish.n.01', 'name': 'alms_dish'}, {'id': 5343, 'synset': 'alpaca.n.02', 'name': 'alpaca'}, {'id': 5344, 'synset': 'alpenstock.n.01', 'name': 'alpenstock'}, {'id': 5345, 'synset': 'altar.n.02', 'name': 'altar'}, {'id': 5346, 'synset': 'altar.n.01', 'name': 'altar'}, {'id': 5347, 'synset': 'altarpiece.n.01', 'name': 'altarpiece'}, {'id': 5348, 'synset': 'altazimuth.n.01', 'name': 'altazimuth'}, {'id': 5349, 'synset': 'alternator.n.01', 'name': 'alternator'}, {'id': 5350, 'synset': 'altimeter.n.01', 'name': 'altimeter'}, {'id': 5351, 'synset': 'amati.n.02', 'name': 'Amati'}, {'id': 5352, 'synset': 'amen_corner.n.01', 'name': 'amen_corner'}, {'id': 5353, 'synset': 'american_organ.n.01', 'name': 'American_organ'}, {'id': 5354, 'synset': 'ammeter.n.01', 'name': 'ammeter'}, {'id': 5355, 'synset': 'ammonia_clock.n.01', 'name': 'ammonia_clock'}, {'id': 5356, 'synset': 'ammunition.n.01', 'name': 'ammunition'}, {'id': 5357, 'synset': 'amphibian.n.02', 'name': 'amphibian'}, {'id': 5358, 'synset': 'amphibian.n.01', 'name': 'amphibian'}, {'id': 5359, 'synset': 'amphitheater.n.02', 'name': 'amphitheater'}, {'id': 5360, 'synset': 'amphitheater.n.01', 'name': 'amphitheater'}, {'id': 5361, 'synset': 'amphora.n.01', 'name': 'amphora'}, {'id': 5362, 'synset': 'ampulla.n.02', 'name': 'ampulla'}, {'id': 5363, 'synset': 'amusement_arcade.n.01', 'name': 'amusement_arcade'}, {'id': 5364, 'synset': 'analog_clock.n.01', 'name': 'analog_clock'}, {'id': 5365, 'synset': 'analog_computer.n.01', 'name': 'analog_computer'}, {'id': 5366, 'synset': 'analog_watch.n.01', 'name': 'analog_watch'}, {'id': 5367, 'synset': 'analytical_balance.n.01', 'name': 'analytical_balance'}, {'id': 5368, 'synset': 'analyzer.n.01', 'name': 'analyzer'}, {'id': 5369, 'synset': 'anamorphosis.n.02', 'name': 'anamorphosis'}, {'id': 5370, 'synset': 'anastigmat.n.01', 'name': 'anastigmat'}, {'id': 5371, 'synset': 'anchor.n.01', 'name': 'anchor'}, {'id': 5372, 'synset': 'anchor_chain.n.01', 'name': 'anchor_chain'}, {'id': 5373, 'synset': 'anchor_light.n.01', 'name': 'anchor_light'}, {'id': 5374, 'synset': 'and_circuit.n.01', 'name': 'AND_circuit'}, {'id': 5375, 'synset': 'andiron.n.01', 'name': 'andiron'}, {'id': 5376, 'synset': 'android.n.01', 'name': 'android'}, {'id': 5377, 'synset': 'anechoic_chamber.n.01', 'name': 'anechoic_chamber'}, {'id': 5378, 'synset': 'anemometer.n.01', 'name': 'anemometer'}, {'id': 5379, 'synset': 'aneroid_barometer.n.01', 'name': 'aneroid_barometer'}, {'id': 5380, 'synset': 'angiocardiogram.n.01', 'name': 'angiocardiogram'}, {'id': 5381, 'synset': 'angioscope.n.01', 'name': 'angioscope'}, {'id': 5382, 'synset': 'angle_bracket.n.02', 'name': 'angle_bracket'}, {'id': 5383, 'synset': 'angledozer.n.01', 'name': 'angledozer'}, {'id': 5384, 'synset': 'ankle_brace.n.01', 'name': 'ankle_brace'}, {'id': 5385, 'synset': 'anklet.n.02', 'name': 'anklet'}, {'id': 5386, 'synset': 'anklet.n.01', 'name': 'anklet'}, {'id': 5387, 'synset': 'ankus.n.01', 'name': 'ankus'}, {'id': 5388, 'synset': 'anode.n.01', 'name': 'anode'}, {'id': 5389, 'synset': 'anode.n.02', 'name': 'anode'}, {'id': 5390, 'synset': 'answering_machine.n.01', 'name': 'answering_machine'}, {'id': 5391, 'synset': 'anteroom.n.01', 'name': 'anteroom'}, {'id': 5392, 'synset': 'antiaircraft.n.01', 'name': 'antiaircraft'}, {'id': 5393, 'synset': 'antiballistic_missile.n.01', 'name': 'antiballistic_missile'}, {'id': 5394, 'synset': 'antifouling_paint.n.01', 'name': 'antifouling_paint'}, {'id': 5395, 'synset': 'anti-g_suit.n.01', 'name': 'anti-G_suit'}, {'id': 5396, 'synset': 'antimacassar.n.01', 'name': 'antimacassar'}, {'id': 5397, 'synset': 'antiperspirant.n.01', 'name': 'antiperspirant'}, {'id': 5398, 'synset': 'anti-submarine_rocket.n.01', 'name': 'anti-submarine_rocket'}, {'id': 5399, 'synset': 'anvil.n.01', 'name': 'anvil'}, {'id': 5400, 'synset': 'ao_dai.n.01', 'name': 'ao_dai'}, {'id': 5401, 'synset': 'apadana.n.01', 'name': 'apadana'}, {'id': 5402, 'synset': 'apartment.n.01', 'name': 'apartment'}, {'id': 5403, 'synset': 'apartment_building.n.01', 'name': 'apartment_building'}, {'id': 5404, 'synset': 'aperture.n.03', 'name': 'aperture'}, {'id': 5405, 'synset': 'aperture.n.01', 'name': 'aperture'}, {'id': 5406, 'synset': 'apiary.n.01', 'name': 'apiary'}, {'id': 5407, 'synset': 'apparatus.n.01', 'name': 'apparatus'}, {'id': 5408, 'synset': 'apparel.n.01', 'name': 'apparel'}, {'id': 5409, 'synset': 'applecart.n.02', 'name': 'applecart'}, {'id': 5410, 'synset': 'appliance.n.02', 'name': 'appliance'}, {'id': 5411, 'synset': 'appliance.n.01', 'name': 'appliance'}, {'id': 5412, 'synset': 'applicator.n.01', 'name': 'applicator'}, {'id': 5413, 'synset': 'appointment.n.03', 'name': 'appointment'}, {'id': 5414, 'synset': 'apron_string.n.01', 'name': 'apron_string'}, {'id': 5415, 'synset': 'apse.n.01', 'name': 'apse'}, {'id': 5416, 'synset': 'aqualung.n.01', 'name': 'aqualung'}, {'id': 5417, 'synset': 'aquaplane.n.01', 'name': 'aquaplane'}, {'id': 5418, 'synset': 'arabesque.n.02', 'name': 'arabesque'}, {'id': 5419, 'synset': 'arbor.n.03', 'name': 'arbor'}, {'id': 5420, 'synset': 'arcade.n.02', 'name': 'arcade'}, {'id': 5421, 'synset': 'arch.n.04', 'name': 'arch'}, {'id': 5422, 'synset': 'architecture.n.01', 'name': 'architecture'}, {'id': 5423, 'synset': 'architrave.n.02', 'name': 'architrave'}, {'id': 5424, 'synset': 'arch_support.n.01', 'name': 'arch_support'}, {'id': 5425, 'synset': 'arc_lamp.n.01', 'name': 'arc_lamp'}, {'id': 5426, 'synset': 'area.n.05', 'name': 'area'}, {'id': 5427, 'synset': 'areaway.n.01', 'name': 'areaway'}, {'id': 5428, 'synset': 'argyle.n.03', 'name': 'argyle'}, {'id': 5429, 'synset': 'ark.n.02', 'name': 'ark'}, {'id': 5430, 'synset': 'arm.n.04', 'name': 'arm'}, {'id': 5431, 'synset': 'armament.n.01', 'name': 'armament'}, {'id': 5432, 'synset': 'armature.n.01', 'name': 'armature'}, {'id': 5433, 'synset': 'armet.n.01', 'name': 'armet'}, {'id': 5434, 'synset': 'arm_guard.n.01', 'name': 'arm_guard'}, {'id': 5435, 'synset': 'armhole.n.01', 'name': 'armhole'}, {'id': 5436, 'synset': 'armilla.n.02', 'name': 'armilla'}, {'id': 5437, 'synset': 'armlet.n.01', 'name': 'armlet'}, {'id': 5438, 'synset': 'armored_car.n.02', 'name': 'armored_car'}, {'id': 5439, 'synset': 'armored_car.n.01', 'name': 'armored_car'}, {'id': 5440, 'synset': 'armored_personnel_carrier.n.01', 'name': 'armored_personnel_carrier'}, {'id': 5441, 'synset': 'armored_vehicle.n.01', 'name': 'armored_vehicle'}, {'id': 5442, 'synset': 'armor_plate.n.01', 'name': 'armor_plate'}, {'id': 5443, 'synset': 'armory.n.04', 'name': 'armory'}, {'id': 5444, 'synset': 'armrest.n.01', 'name': 'armrest'}, {'id': 5445, 'synset': 'arquebus.n.01', 'name': 'arquebus'}, {'id': 5446, 'synset': 'array.n.04', 'name': 'array'}, {'id': 5447, 'synset': 'array.n.03', 'name': 'array'}, {'id': 5448, 'synset': 'arrester.n.01', 'name': 'arrester'}, {'id': 5449, 'synset': 'arrow.n.02', 'name': 'arrow'}, {'id': 5450, 'synset': 'arsenal.n.01', 'name': 'arsenal'}, {'id': 5451, 'synset': 'arterial_road.n.01', 'name': 'arterial_road'}, {'id': 5452, 'synset': 'arthrogram.n.01', 'name': 'arthrogram'}, {'id': 5453, 'synset': 'arthroscope.n.01', 'name': 'arthroscope'}, {'id': 5454, 'synset': 'artificial_heart.n.01', 'name': 'artificial_heart'}, {'id': 5455, 'synset': 'artificial_horizon.n.01', 'name': 'artificial_horizon'}, {'id': 5456, 'synset': 'artificial_joint.n.01', 'name': 'artificial_joint'}, {'id': 5457, 'synset': 'artificial_kidney.n.01', 'name': 'artificial_kidney'}, {'id': 5458, 'synset': 'artificial_skin.n.01', 'name': 'artificial_skin'}, {'id': 5459, 'synset': 'artillery.n.01', 'name': 'artillery'}, {'id': 5460, 'synset': 'artillery_shell.n.01', 'name': 'artillery_shell'}, {'id': 5461, 'synset': "artist's_loft.n.01", 'name': "artist's_loft"}, {'id': 5462, 'synset': 'art_school.n.01', 'name': 'art_school'}, {'id': 5463, 'synset': 'ascot.n.01', 'name': 'ascot'}, {'id': 5464, 'synset': 'ash-pan.n.01', 'name': 'ash-pan'}, {'id': 5465, 'synset': 'aspergill.n.01', 'name': 'aspergill'}, {'id': 5466, 'synset': 'aspersorium.n.01', 'name': 'aspersorium'}, {'id': 5467, 'synset': 'aspirator.n.01', 'name': 'aspirator'}, {'id': 5468, 'synset': 'aspirin_powder.n.01', 'name': 'aspirin_powder'}, {'id': 5469, 'synset': 'assault_gun.n.02', 'name': 'assault_gun'}, {'id': 5470, 'synset': 'assault_rifle.n.01', 'name': 'assault_rifle'}, {'id': 5471, 'synset': 'assegai.n.01', 'name': 'assegai'}, {'id': 5472, 'synset': 'assembly.n.01', 'name': 'assembly'}, {'id': 5473, 'synset': 'assembly.n.05', 'name': 'assembly'}, {'id': 5474, 'synset': 'assembly_hall.n.01', 'name': 'assembly_hall'}, {'id': 5475, 'synset': 'assembly_plant.n.01', 'name': 'assembly_plant'}, {'id': 5476, 'synset': 'astatic_coils.n.01', 'name': 'astatic_coils'}, {'id': 5477, 'synset': 'astatic_galvanometer.n.01', 'name': 'astatic_galvanometer'}, {'id': 5478, 'synset': 'astrodome.n.01', 'name': 'astrodome'}, {'id': 5479, 'synset': 'astrolabe.n.01', 'name': 'astrolabe'}, {'id': 5480, 'synset': 'astronomical_telescope.n.01', 'name': 'astronomical_telescope'}, {'id': 5481, 'synset': 'astronomy_satellite.n.01', 'name': 'astronomy_satellite'}, {'id': 5482, 'synset': 'athenaeum.n.02', 'name': 'athenaeum'}, {'id': 5483, 'synset': 'athletic_sock.n.01', 'name': 'athletic_sock'}, {'id': 5484, 'synset': 'athletic_supporter.n.01', 'name': 'athletic_supporter'}, {'id': 5485, 'synset': 'atlas.n.04', 'name': 'atlas'}, {'id': 5486, 'synset': 'atmometer.n.01', 'name': 'atmometer'}, {'id': 5487, 'synset': 'atom_bomb.n.01', 'name': 'atom_bomb'}, {'id': 5488, 'synset': 'atomic_clock.n.01', 'name': 'atomic_clock'}, {'id': 5489, 'synset': 'atomic_pile.n.01', 'name': 'atomic_pile'}, {'id': 5490, 'synset': 'atrium.n.02', 'name': 'atrium'}, {'id': 5491, 'synset': 'attache_case.n.01', 'name': 'attache_case'}, {'id': 5492, 'synset': 'attachment.n.04', 'name': 'attachment'}, {'id': 5493, 'synset': 'attack_submarine.n.01', 'name': 'attack_submarine'}, {'id': 5494, 'synset': 'attenuator.n.01', 'name': 'attenuator'}, {'id': 5495, 'synset': 'attic.n.04', 'name': 'attic'}, {'id': 5496, 'synset': 'attic_fan.n.01', 'name': 'attic_fan'}, {'id': 5497, 'synset': 'attire.n.01', 'name': 'attire'}, {'id': 5498, 'synset': 'audio_amplifier.n.01', 'name': 'audio_amplifier'}, {'id': 5499, 'synset': 'audiocassette.n.01', 'name': 'audiocassette'}, {'id': 5500, 'synset': 'audio_cd.n.01', 'name': 'audio_CD'}, {'id': 5501, 'synset': 'audiometer.n.01', 'name': 'audiometer'}, {'id': 5502, 'synset': 'audio_system.n.01', 'name': 'audio_system'}, {'id': 5503, 'synset': 'audiotape.n.02', 'name': 'audiotape'}, {'id': 5504, 'synset': 'audiotape.n.01', 'name': 'audiotape'}, {'id': 5505, 'synset': 'audiovisual.n.01', 'name': 'audiovisual'}, {'id': 5506, 'synset': 'auditorium.n.01', 'name': 'auditorium'}, {'id': 5507, 'synset': 'auger.n.02', 'name': 'auger'}, {'id': 5508, 'synset': 'autobahn.n.01', 'name': 'autobahn'}, {'id': 5509, 'synset': 'autoclave.n.01', 'name': 'autoclave'}, {'id': 5510, 'synset': 'autofocus.n.01', 'name': 'autofocus'}, {'id': 5511, 'synset': 'autogiro.n.01', 'name': 'autogiro'}, {'id': 5512, 'synset': 'autoinjector.n.01', 'name': 'autoinjector'}, {'id': 5513, 'synset': 'autoloader.n.01', 'name': 'autoloader'}, {'id': 5514, 'synset': 'automat.n.02', 'name': 'automat'}, {'id': 5515, 'synset': 'automat.n.01', 'name': 'automat'}, {'id': 5516, 'synset': 'automatic_choke.n.01', 'name': 'automatic_choke'}, {'id': 5517, 'synset': 'automatic_firearm.n.01', 'name': 'automatic_firearm'}, {'id': 5518, 'synset': 'automatic_pistol.n.01', 'name': 'automatic_pistol'}, {'id': 5519, 'synset': 'automatic_rifle.n.01', 'name': 'automatic_rifle'}, {'id': 5520, 'synset': 'automatic_transmission.n.01', 'name': 'automatic_transmission'}, {'id': 5521, 'synset': 'automation.n.03', 'name': 'automation'}, {'id': 5522, 'synset': 'automaton.n.02', 'name': 'automaton'}, {'id': 5523, 'synset': 'automobile_engine.n.01', 'name': 'automobile_engine'}, {'id': 5524, 'synset': 'automobile_factory.n.01', 'name': 'automobile_factory'}, {'id': 5525, 'synset': 'automobile_horn.n.01', 'name': 'automobile_horn'}, {'id': 5526, 'synset': 'autopilot.n.02', 'name': 'autopilot'}, {'id': 5527, 'synset': 'autoradiograph.n.01', 'name': 'autoradiograph'}, {'id': 5528, 'synset': 'autostrada.n.01', 'name': 'autostrada'}, {'id': 5529, 'synset': 'auxiliary_boiler.n.01', 'name': 'auxiliary_boiler'}, {'id': 5530, 'synset': 'auxiliary_engine.n.01', 'name': 'auxiliary_engine'}, {'id': 5531, 'synset': 'auxiliary_pump.n.01', 'name': 'auxiliary_pump'}, {'id': 5532, 'synset': 'auxiliary_research_submarine.n.01', 'name': 'auxiliary_research_submarine'}, {'id': 5533, 'synset': 'auxiliary_storage.n.01', 'name': 'auxiliary_storage'}, {'id': 5534, 'synset': 'aviary.n.01', 'name': 'aviary'}, {'id': 5535, 'synset': 'awl.n.01', 'name': 'awl'}, {'id': 5536, 'synset': 'ax_handle.n.01', 'name': 'ax_handle'}, {'id': 5537, 'synset': 'ax_head.n.01', 'name': 'ax_head'}, {'id': 5538, 'synset': 'axis.n.06', 'name': 'axis'}, {'id': 5539, 'synset': 'axle.n.01', 'name': 'axle'}, {'id': 5540, 'synset': 'axle_bar.n.01', 'name': 'axle_bar'}, {'id': 5541, 'synset': 'axletree.n.01', 'name': 'axletree'}, {'id': 5542, 'synset': 'babushka.n.01', 'name': 'babushka'}, {'id': 5543, 'synset': 'baby_bed.n.01', 'name': 'baby_bed'}, {'id': 5544, 'synset': 'baby_grand.n.01', 'name': 'baby_grand'}, {'id': 5545, 'synset': 'baby_powder.n.01', 'name': 'baby_powder'}, {'id': 5546, 'synset': 'baby_shoe.n.01', 'name': 'baby_shoe'}, {'id': 5547, 'synset': 'back.n.08', 'name': 'back'}, {'id': 5548, 'synset': 'back.n.07', 'name': 'back'}, {'id': 5549, 'synset': 'backbench.n.01', 'name': 'backbench'}, {'id': 5550, 'synset': 'backboard.n.02', 'name': 'backboard'}, {'id': 5551, 'synset': 'backbone.n.05', 'name': 'backbone'}, {'id': 5552, 'synset': 'back_brace.n.01', 'name': 'back_brace'}, {'id': 5553, 'synset': 'backgammon_board.n.01', 'name': 'backgammon_board'}, {'id': 5554, 'synset': 'background.n.07', 'name': 'background'}, {'id': 5555, 'synset': 'backhoe.n.01', 'name': 'backhoe'}, {'id': 5556, 'synset': 'backlighting.n.01', 'name': 'backlighting'}, {'id': 5557, 'synset': 'backpacking_tent.n.01', 'name': 'backpacking_tent'}, {'id': 5558, 'synset': 'backplate.n.01', 'name': 'backplate'}, {'id': 5559, 'synset': 'back_porch.n.01', 'name': 'back_porch'}, {'id': 5560, 'synset': 'backsaw.n.01', 'name': 'backsaw'}, {'id': 5561, 'synset': 'backscratcher.n.02', 'name': 'backscratcher'}, {'id': 5562, 'synset': 'backseat.n.02', 'name': 'backseat'}, {'id': 5563, 'synset': 'backspace_key.n.01', 'name': 'backspace_key'}, {'id': 5564, 'synset': 'backstairs.n.01', 'name': 'backstairs'}, {'id': 5565, 'synset': 'backstay.n.01', 'name': 'backstay'}, {'id': 5566, 'synset': 'backstop.n.02', 'name': 'backstop'}, {'id': 5567, 'synset': 'backsword.n.02', 'name': 'backsword'}, {'id': 5568, 'synset': 'backup_system.n.01', 'name': 'backup_system'}, {'id': 5569, 'synset': 'badminton_court.n.01', 'name': 'badminton_court'}, {'id': 5570, 'synset': 'badminton_equipment.n.01', 'name': 'badminton_equipment'}, {'id': 5571, 'synset': 'badminton_racket.n.01', 'name': 'badminton_racket'}, {'id': 5572, 'synset': 'bag.n.01', 'name': 'bag'}, {'id': 5573, 'synset': 'baggage.n.01', 'name': 'baggage'}, {'id': 5574, 'synset': 'baggage.n.03', 'name': 'baggage'}, {'id': 5575, 'synset': 'baggage_car.n.01', 'name': 'baggage_car'}, {'id': 5576, 'synset': 'baggage_claim.n.01', 'name': 'baggage_claim'}, {'id': 5577, 'synset': 'bailey.n.04', 'name': 'bailey'}, {'id': 5578, 'synset': 'bailey.n.03', 'name': 'bailey'}, {'id': 5579, 'synset': 'bailey_bridge.n.01', 'name': 'Bailey_bridge'}, {'id': 5580, 'synset': 'bain-marie.n.01', 'name': 'bain-marie'}, {'id': 5581, 'synset': 'baize.n.01', 'name': 'baize'}, {'id': 5582, 'synset': 'bakery.n.01', 'name': 'bakery'}, {'id': 5583, 'synset': 'balaclava.n.01', 'name': 'balaclava'}, {'id': 5584, 'synset': 'balalaika.n.01', 'name': 'balalaika'}, {'id': 5585, 'synset': 'balance.n.12', 'name': 'balance'}, {'id': 5586, 'synset': 'balance_beam.n.01', 'name': 'balance_beam'}, {'id': 5587, 'synset': 'balance_wheel.n.01', 'name': 'balance_wheel'}, {'id': 5588, 'synset': 'balbriggan.n.01', 'name': 'balbriggan'}, {'id': 5589, 'synset': 'balcony.n.02', 'name': 'balcony'}, {'id': 5590, 'synset': 'balcony.n.01', 'name': 'balcony'}, {'id': 5591, 'synset': 'baldachin.n.01', 'name': 'baldachin'}, {'id': 5592, 'synset': 'baldric.n.01', 'name': 'baldric'}, {'id': 5593, 'synset': 'bale.n.01', 'name': 'bale'}, {'id': 5594, 'synset': 'baling_wire.n.01', 'name': 'baling_wire'}, {'id': 5595, 'synset': 'ball.n.01', 'name': 'ball'}, {'id': 5596, 'synset': 'ball_and_chain.n.01', 'name': 'ball_and_chain'}, {'id': 5597, 'synset': 'ball-and-socket_joint.n.02', 'name': 'ball-and-socket_joint'}, {'id': 5598, 'synset': 'ballast.n.05', 'name': 'ballast'}, {'id': 5599, 'synset': 'ball_bearing.n.01', 'name': 'ball_bearing'}, {'id': 5600, 'synset': 'ball_cartridge.n.01', 'name': 'ball_cartridge'}, {'id': 5601, 'synset': 'ballcock.n.01', 'name': 'ballcock'}, {'id': 5602, 'synset': 'balldress.n.01', 'name': 'balldress'}, {'id': 5603, 'synset': 'ball_gown.n.01', 'name': 'ball_gown'}, {'id': 5604, 'synset': 'ballistic_galvanometer.n.01', 'name': 'ballistic_galvanometer'}, {'id': 5605, 'synset': 'ballistic_missile.n.01', 'name': 'ballistic_missile'}, {'id': 5606, 'synset': 'ballistic_pendulum.n.01', 'name': 'ballistic_pendulum'}, {'id': 5607, 'synset': 'ballistocardiograph.n.01', 'name': 'ballistocardiograph'}, {'id': 5608, 'synset': 'balloon_bomb.n.01', 'name': 'balloon_bomb'}, {'id': 5609, 'synset': 'balloon_sail.n.01', 'name': 'balloon_sail'}, {'id': 5610, 'synset': 'ballot_box.n.01', 'name': 'ballot_box'}, {'id': 5611, 'synset': 'ballpark.n.01', 'name': 'ballpark'}, {'id': 5612, 'synset': 'ball-peen_hammer.n.01', 'name': 'ball-peen_hammer'}, {'id': 5613, 'synset': 'ballpoint.n.01', 'name': 'ballpoint'}, {'id': 5614, 'synset': 'ballroom.n.01', 'name': 'ballroom'}, {'id': 5615, 'synset': 'ball_valve.n.01', 'name': 'ball_valve'}, {'id': 5616, 'synset': 'balsa_raft.n.01', 'name': 'balsa_raft'}, {'id': 5617, 'synset': 'baluster.n.01', 'name': 'baluster'}, {'id': 5618, 'synset': 'banana_boat.n.01', 'name': 'banana_boat'}, {'id': 5619, 'synset': 'band.n.13', 'name': 'band'}, {'id': 5620, 'synset': 'bandbox.n.01', 'name': 'bandbox'}, {'id': 5621, 'synset': 'banderilla.n.01', 'name': 'banderilla'}, {'id': 5622, 'synset': 'bandoleer.n.01', 'name': 'bandoleer'}, {'id': 5623, 'synset': 'bandoneon.n.01', 'name': 'bandoneon'}, {'id': 5624, 'synset': 'bandsaw.n.01', 'name': 'bandsaw'}, {'id': 5625, 'synset': 'bandwagon.n.02', 'name': 'bandwagon'}, {'id': 5626, 'synset': 'bangalore_torpedo.n.01', 'name': 'bangalore_torpedo'}, {'id': 5627, 'synset': 'bangle.n.02', 'name': 'bangle'}, {'id': 5628, 'synset': 'bannister.n.02', 'name': 'bannister'}, {'id': 5629, 'synset': 'banquette.n.01', 'name': 'banquette'}, {'id': 5630, 'synset': 'banyan.n.02', 'name': 'banyan'}, {'id': 5631, 'synset': 'baptismal_font.n.01', 'name': 'baptismal_font'}, {'id': 5632, 'synset': 'bar.n.03', 'name': 'bar'}, {'id': 5633, 'synset': 'bar.n.02', 'name': 'bar'}, {'id': 5634, 'synset': 'barbecue.n.03', 'name': 'barbecue'}, {'id': 5635, 'synset': 'barbed_wire.n.01', 'name': 'barbed_wire'}, {'id': 5636, 'synset': 'barber_chair.n.01', 'name': 'barber_chair'}, {'id': 5637, 'synset': 'barbershop.n.01', 'name': 'barbershop'}, {'id': 5638, 'synset': 'barbette_carriage.n.01', 'name': 'barbette_carriage'}, {'id': 5639, 'synset': 'barbican.n.01', 'name': 'barbican'}, {'id': 5640, 'synset': 'bar_bit.n.01', 'name': 'bar_bit'}, {'id': 5641, 'synset': 'bareboat.n.01', 'name': 'bareboat'}, {'id': 5642, 'synset': 'barge_pole.n.01', 'name': 'barge_pole'}, {'id': 5643, 'synset': 'baritone.n.03', 'name': 'baritone'}, {'id': 5644, 'synset': 'bark.n.03', 'name': 'bark'}, {'id': 5645, 'synset': 'bar_magnet.n.01', 'name': 'bar_magnet'}, {'id': 5646, 'synset': 'bar_mask.n.01', 'name': 'bar_mask'}, {'id': 5647, 'synset': 'barn.n.01', 'name': 'barn'}, {'id': 5648, 'synset': 'barndoor.n.01', 'name': 'barndoor'}, {'id': 5649, 'synset': 'barn_door.n.01', 'name': 'barn_door'}, {'id': 5650, 'synset': 'barnyard.n.01', 'name': 'barnyard'}, {'id': 5651, 'synset': 'barograph.n.01', 'name': 'barograph'}, {'id': 5652, 'synset': 'barometer.n.01', 'name': 'barometer'}, {'id': 5653, 'synset': 'barong.n.01', 'name': 'barong'}, {'id': 5654, 'synset': 'barouche.n.01', 'name': 'barouche'}, {'id': 5655, 'synset': 'bar_printer.n.01', 'name': 'bar_printer'}, {'id': 5656, 'synset': 'barrack.n.01', 'name': 'barrack'}, {'id': 5657, 'synset': 'barrage_balloon.n.01', 'name': 'barrage_balloon'}, {'id': 5658, 'synset': 'barrel.n.01', 'name': 'barrel'}, {'id': 5659, 'synset': 'barrelhouse.n.01', 'name': 'barrelhouse'}, {'id': 5660, 'synset': 'barrel_knot.n.01', 'name': 'barrel_knot'}, {'id': 5661, 'synset': 'barrel_organ.n.01', 'name': 'barrel_organ'}, {'id': 5662, 'synset': 'barrel_vault.n.01', 'name': 'barrel_vault'}, {'id': 5663, 'synset': 'barricade.n.02', 'name': 'barricade'}, {'id': 5664, 'synset': 'barrier.n.01', 'name': 'barrier'}, {'id': 5665, 'synset': 'barroom.n.01', 'name': 'barroom'}, {'id': 5666, 'synset': 'bascule.n.01', 'name': 'bascule'}, {'id': 5667, 'synset': 'base.n.08', 'name': 'base'}, {'id': 5668, 'synset': 'baseball_equipment.n.01', 'name': 'baseball_equipment'}, {'id': 5669, 'synset': 'basement.n.01', 'name': 'basement'}, {'id': 5670, 'synset': 'basement.n.02', 'name': 'basement'}, {'id': 5671, 'synset': 'basic_point_defense_missile_system.n.01', 'name': 'basic_point_defense_missile_system'}, {'id': 5672, 'synset': 'basilica.n.02', 'name': 'basilica'}, {'id': 5673, 'synset': 'basilica.n.01', 'name': 'basilica'}, {'id': 5674, 'synset': 'basilisk.n.02', 'name': 'basilisk'}, {'id': 5675, 'synset': 'basin.n.01', 'name': 'basin'}, {'id': 5676, 'synset': 'basinet.n.01', 'name': 'basinet'}, {'id': 5677, 'synset': 'basket.n.03', 'name': 'basket'}, {'id': 5678, 'synset': 'basketball_court.n.01', 'name': 'basketball_court'}, {'id': 5679, 'synset': 'basketball_equipment.n.01', 'name': 'basketball_equipment'}, {'id': 5680, 'synset': 'basket_weave.n.01', 'name': 'basket_weave'}, {'id': 5681, 'synset': 'bass.n.07', 'name': 'bass'}, {'id': 5682, 'synset': 'bass_clarinet.n.01', 'name': 'bass_clarinet'}, {'id': 5683, 'synset': 'bass_drum.n.01', 'name': 'bass_drum'}, {'id': 5684, 'synset': 'basset_horn.n.01', 'name': 'basset_horn'}, {'id': 5685, 'synset': 'bass_fiddle.n.01', 'name': 'bass_fiddle'}, {'id': 5686, 'synset': 'bass_guitar.n.01', 'name': 'bass_guitar'}, {'id': 5687, 'synset': 'bassinet.n.01', 'name': 'bassinet'}, {'id': 5688, 'synset': 'bassinet.n.02', 'name': 'bassinet'}, {'id': 5689, 'synset': 'bassoon.n.01', 'name': 'bassoon'}, {'id': 5690, 'synset': 'baster.n.03', 'name': 'baster'}, {'id': 5691, 'synset': 'bastinado.n.01', 'name': 'bastinado'}, {'id': 5692, 'synset': 'bastion.n.03', 'name': 'bastion'}, {'id': 5693, 'synset': 'bastion.n.02', 'name': 'bastion'}, {'id': 5694, 'synset': 'bat.n.05', 'name': 'bat'}, {'id': 5695, 'synset': 'bath.n.01', 'name': 'bath'}, {'id': 5696, 'synset': 'bath_chair.n.01', 'name': 'bath_chair'}, {'id': 5697, 'synset': 'bathhouse.n.02', 'name': 'bathhouse'}, {'id': 5698, 'synset': 'bathhouse.n.01', 'name': 'bathhouse'}, {'id': 5699, 'synset': 'bathing_cap.n.01', 'name': 'bathing_cap'}, {'id': 5700, 'synset': 'bath_oil.n.01', 'name': 'bath_oil'}, {'id': 5701, 'synset': 'bathroom.n.01', 'name': 'bathroom'}, {'id': 5702, 'synset': 'bath_salts.n.01', 'name': 'bath_salts'}, {'id': 5703, 'synset': 'bathyscaphe.n.01', 'name': 'bathyscaphe'}, {'id': 5704, 'synset': 'bathysphere.n.01', 'name': 'bathysphere'}, {'id': 5705, 'synset': 'batik.n.01', 'name': 'batik'}, {'id': 5706, 'synset': 'batiste.n.01', 'name': 'batiste'}, {'id': 5707, 'synset': 'baton.n.01', 'name': 'baton'}, {'id': 5708, 'synset': 'baton.n.05', 'name': 'baton'}, {'id': 5709, 'synset': 'baton.n.04', 'name': 'baton'}, {'id': 5710, 'synset': 'baton.n.03', 'name': 'baton'}, {'id': 5711, 'synset': 'battering_ram.n.01', 'name': 'battering_ram'}, {'id': 5712, 'synset': "batter's_box.n.01", 'name': "batter's_box"}, {'id': 5713, 'synset': 'battery.n.05', 'name': 'battery'}, {'id': 5714, 'synset': 'batting_cage.n.01', 'name': 'batting_cage'}, {'id': 5715, 'synset': 'batting_glove.n.01', 'name': 'batting_glove'}, {'id': 5716, 'synset': 'batting_helmet.n.01', 'name': 'batting_helmet'}, {'id': 5717, 'synset': 'battle-ax.n.01', 'name': 'battle-ax'}, {'id': 5718, 'synset': 'battle_cruiser.n.01', 'name': 'battle_cruiser'}, {'id': 5719, 'synset': 'battle_dress.n.01', 'name': 'battle_dress'}, {'id': 5720, 'synset': 'battlement.n.01', 'name': 'battlement'}, {'id': 5721, 'synset': 'battleship.n.01', 'name': 'battleship'}, {'id': 5722, 'synset': 'battle_sight.n.01', 'name': 'battle_sight'}, {'id': 5723, 'synset': 'bay.n.05', 'name': 'bay'}, {'id': 5724, 'synset': 'bay.n.04', 'name': 'bay'}, {'id': 5725, 'synset': 'bayonet.n.01', 'name': 'bayonet'}, {'id': 5726, 'synset': 'bay_rum.n.01', 'name': 'bay_rum'}, {'id': 5727, 'synset': 'bay_window.n.02', 'name': 'bay_window'}, {'id': 5728, 'synset': 'bazaar.n.01', 'name': 'bazaar'}, {'id': 5729, 'synset': 'bazaar.n.02', 'name': 'bazaar'}, {'id': 5730, 'synset': 'bazooka.n.01', 'name': 'bazooka'}, {'id': 5731, 'synset': 'b_battery.n.01', 'name': 'B_battery'}, {'id': 5732, 'synset': 'bb_gun.n.01', 'name': 'BB_gun'}, {'id': 5733, 'synset': 'beach_house.n.01', 'name': 'beach_house'}, {'id': 5734, 'synset': 'beach_towel.n.01', 'name': 'beach_towel'}, {'id': 5735, 'synset': 'beach_wagon.n.01', 'name': 'beach_wagon'}, {'id': 5736, 'synset': 'beachwear.n.01', 'name': 'beachwear'}, {'id': 5737, 'synset': 'beacon.n.03', 'name': 'beacon'}, {'id': 5738, 'synset': 'beading_plane.n.01', 'name': 'beading_plane'}, {'id': 5739, 'synset': 'beaker.n.02', 'name': 'beaker'}, {'id': 5740, 'synset': 'beaker.n.01', 'name': 'beaker'}, {'id': 5741, 'synset': 'beam.n.02', 'name': 'beam'}, {'id': 5742, 'synset': 'beam_balance.n.01', 'name': 'beam_balance'}, {'id': 5743, 'synset': 'bearing.n.06', 'name': 'bearing'}, {'id': 5744, 'synset': 'bearing_rein.n.01', 'name': 'bearing_rein'}, {'id': 5745, 'synset': 'bearing_wall.n.01', 'name': 'bearing_wall'}, {'id': 5746, 'synset': 'bearskin.n.02', 'name': 'bearskin'}, {'id': 5747, 'synset': 'beater.n.02', 'name': 'beater'}, {'id': 5748, 'synset': 'beating-reed_instrument.n.01', 'name': 'beating-reed_instrument'}, {'id': 5749, 'synset': 'beaver.n.06', 'name': 'beaver'}, {'id': 5750, 'synset': 'beaver.n.05', 'name': 'beaver'}, {'id': 5751, 'synset': 'beckman_thermometer.n.01', 'name': 'Beckman_thermometer'}, {'id': 5752, 'synset': 'bed.n.08', 'name': 'bed'}, {'id': 5753, 'synset': 'bed_and_breakfast.n.01', 'name': 'bed_and_breakfast'}, {'id': 5754, 'synset': 'bedclothes.n.01', 'name': 'bedclothes'}, {'id': 5755, 'synset': 'bedford_cord.n.01', 'name': 'Bedford_cord'}, {'id': 5756, 'synset': 'bed_jacket.n.01', 'name': 'bed_jacket'}, {'id': 5757, 'synset': 'bedpost.n.01', 'name': 'bedpost'}, {'id': 5758, 'synset': 'bedroll.n.01', 'name': 'bedroll'}, {'id': 5759, 'synset': 'bedroom.n.01', 'name': 'bedroom'}, {'id': 5760, 'synset': 'bedroom_furniture.n.01', 'name': 'bedroom_furniture'}, {'id': 5761, 'synset': 'bedsitting_room.n.01', 'name': 'bedsitting_room'}, {'id': 5762, 'synset': 'bedspring.n.01', 'name': 'bedspring'}, {'id': 5763, 'synset': 'bedstead.n.01', 'name': 'bedstead'}, {'id': 5764, 'synset': 'beefcake.n.01', 'name': 'beefcake'}, {'id': 5765, 'synset': 'beehive.n.04', 'name': 'beehive'}, {'id': 5766, 'synset': 'beer_barrel.n.01', 'name': 'beer_barrel'}, {'id': 5767, 'synset': 'beer_garden.n.01', 'name': 'beer_garden'}, {'id': 5768, 'synset': 'beer_glass.n.01', 'name': 'beer_glass'}, {'id': 5769, 'synset': 'beer_hall.n.01', 'name': 'beer_hall'}, {'id': 5770, 'synset': 'beer_mat.n.01', 'name': 'beer_mat'}, {'id': 5771, 'synset': 'beer_mug.n.01', 'name': 'beer_mug'}, {'id': 5772, 'synset': 'belaying_pin.n.01', 'name': 'belaying_pin'}, {'id': 5773, 'synset': 'belfry.n.02', 'name': 'belfry'}, {'id': 5774, 'synset': 'bell_arch.n.01', 'name': 'bell_arch'}, {'id': 5775, 'synset': 'bellarmine.n.02', 'name': 'bellarmine'}, {'id': 5776, 'synset': 'bellbottom_trousers.n.01', 'name': 'bellbottom_trousers'}, {'id': 5777, 'synset': 'bell_cote.n.01', 'name': 'bell_cote'}, {'id': 5778, 'synset': 'bell_foundry.n.01', 'name': 'bell_foundry'}, {'id': 5779, 'synset': 'bell_gable.n.01', 'name': 'bell_gable'}, {'id': 5780, 'synset': 'bell_jar.n.01', 'name': 'bell_jar'}, {'id': 5781, 'synset': 'bellows.n.01', 'name': 'bellows'}, {'id': 5782, 'synset': 'bellpull.n.01', 'name': 'bellpull'}, {'id': 5783, 'synset': 'bell_push.n.01', 'name': 'bell_push'}, {'id': 5784, 'synset': 'bell_seat.n.01', 'name': 'bell_seat'}, {'id': 5785, 'synset': 'bell_tent.n.01', 'name': 'bell_tent'}, {'id': 5786, 'synset': 'bell_tower.n.01', 'name': 'bell_tower'}, {'id': 5787, 'synset': 'bellyband.n.01', 'name': 'bellyband'}, {'id': 5788, 'synset': 'belt.n.06', 'name': 'belt'}, {'id': 5789, 'synset': 'belting.n.01', 'name': 'belting'}, {'id': 5790, 'synset': 'bench_clamp.n.01', 'name': 'bench_clamp'}, {'id': 5791, 'synset': 'bench_hook.n.01', 'name': 'bench_hook'}, {'id': 5792, 'synset': 'bench_lathe.n.01', 'name': 'bench_lathe'}, {'id': 5793, 'synset': 'bench_press.n.02', 'name': 'bench_press'}, {'id': 5794, 'synset': 'bender.n.01', 'name': 'bender'}, {'id': 5795, 'synset': 'berlin.n.03', 'name': 'berlin'}, {'id': 5796, 'synset': 'bermuda_shorts.n.01', 'name': 'Bermuda_shorts'}, {'id': 5797, 'synset': 'berth.n.03', 'name': 'berth'}, {'id': 5798, 'synset': 'besom.n.01', 'name': 'besom'}, {'id': 5799, 'synset': 'bessemer_converter.n.01', 'name': 'Bessemer_converter'}, {'id': 5800, 'synset': 'bethel.n.01', 'name': 'bethel'}, {'id': 5801, 'synset': 'betting_shop.n.01', 'name': 'betting_shop'}, {'id': 5802, 'synset': 'bevatron.n.01', 'name': 'bevatron'}, {'id': 5803, 'synset': 'bevel.n.02', 'name': 'bevel'}, {'id': 5804, 'synset': 'bevel_gear.n.01', 'name': 'bevel_gear'}, {'id': 5805, 'synset': 'b-flat_clarinet.n.01', 'name': 'B-flat_clarinet'}, {'id': 5806, 'synset': 'bib.n.01', 'name': 'bib'}, {'id': 5807, 'synset': 'bib-and-tucker.n.01', 'name': 'bib-and-tucker'}, {'id': 5808, 'synset': 'bicorn.n.01', 'name': 'bicorn'}, {'id': 5809, 'synset': 'bicycle-built-for-two.n.01', 'name': 'bicycle-built-for-two'}, {'id': 5810, 'synset': 'bicycle_chain.n.01', 'name': 'bicycle_chain'}, {'id': 5811, 'synset': 'bicycle_clip.n.01', 'name': 'bicycle_clip'}, {'id': 5812, 'synset': 'bicycle_pump.n.01', 'name': 'bicycle_pump'}, {'id': 5813, 'synset': 'bicycle_rack.n.01', 'name': 'bicycle_rack'}, {'id': 5814, 'synset': 'bicycle_seat.n.01', 'name': 'bicycle_seat'}, {'id': 5815, 'synset': 'bicycle_wheel.n.01', 'name': 'bicycle_wheel'}, {'id': 5816, 'synset': 'bidet.n.01', 'name': 'bidet'}, {'id': 5817, 'synset': 'bier.n.02', 'name': 'bier'}, {'id': 5818, 'synset': 'bier.n.01', 'name': 'bier'}, {'id': 5819, 'synset': 'bi-fold_door.n.01', 'name': 'bi-fold_door'}, {'id': 5820, 'synset': 'bifocals.n.01', 'name': 'bifocals'}, {'id': 5821, 'synset': 'big_blue.n.01', 'name': 'Big_Blue'}, {'id': 5822, 'synset': 'big_board.n.02', 'name': 'big_board'}, {'id': 5823, 'synset': 'bight.n.04', 'name': 'bight'}, {'id': 5824, 'synset': 'bikini.n.02', 'name': 'bikini'}, {'id': 5825, 'synset': 'bikini_pants.n.01', 'name': 'bikini_pants'}, {'id': 5826, 'synset': 'bilge.n.02', 'name': 'bilge'}, {'id': 5827, 'synset': 'bilge_keel.n.01', 'name': 'bilge_keel'}, {'id': 5828, 'synset': 'bilge_pump.n.01', 'name': 'bilge_pump'}, {'id': 5829, 'synset': 'bilge_well.n.01', 'name': 'bilge_well'}, {'id': 5830, 'synset': 'bill.n.08', 'name': 'bill'}, {'id': 5831, 'synset': 'billiard_ball.n.01', 'name': 'billiard_ball'}, {'id': 5832, 'synset': 'billiard_room.n.01', 'name': 'billiard_room'}, {'id': 5833, 'synset': 'bin.n.01', 'name': 'bin'}, {'id': 5834, 'synset': 'binder.n.04', 'name': 'binder'}, {'id': 5835, 'synset': 'bindery.n.01', 'name': 'bindery'}, {'id': 5836, 'synset': 'binding.n.05', 'name': 'binding'}, {'id': 5837, 'synset': 'bin_liner.n.01', 'name': 'bin_liner'}, {'id': 5838, 'synset': 'binnacle.n.01', 'name': 'binnacle'}, {'id': 5839, 'synset': 'binocular_microscope.n.01', 'name': 'binocular_microscope'}, {'id': 5840, 'synset': 'biochip.n.01', 'name': 'biochip'}, {'id': 5841, 'synset': 'biohazard_suit.n.01', 'name': 'biohazard_suit'}, {'id': 5842, 'synset': 'bioscope.n.02', 'name': 'bioscope'}, {'id': 5843, 'synset': 'biplane.n.01', 'name': 'biplane'}, {'id': 5844, 'synset': 'birch.n.03', 'name': 'birch'}, {'id': 5845, 'synset': 'birchbark_canoe.n.01', 'name': 'birchbark_canoe'}, {'id': 5846, 'synset': 'birdcall.n.02', 'name': 'birdcall'}, {'id': 5847, 'synset': 'bird_shot.n.01', 'name': 'bird_shot'}, {'id': 5848, 'synset': 'biretta.n.01', 'name': 'biretta'}, {'id': 5849, 'synset': 'bishop.n.03', 'name': 'bishop'}, {'id': 5850, 'synset': 'bistro.n.01', 'name': 'bistro'}, {'id': 5851, 'synset': 'bit.n.11', 'name': 'bit'}, {'id': 5852, 'synset': 'bit.n.05', 'name': 'bit'}, {'id': 5853, 'synset': 'bite_plate.n.01', 'name': 'bite_plate'}, {'id': 5854, 'synset': 'bitewing.n.01', 'name': 'bitewing'}, {'id': 5855, 'synset': 'bitumastic.n.01', 'name': 'bitumastic'}, {'id': 5856, 'synset': 'black.n.07', 'name': 'black'}, {'id': 5857, 'synset': 'black.n.06', 'name': 'black'}, {'id': 5858, 'synset': 'blackboard_eraser.n.01', 'name': 'blackboard_eraser'}, {'id': 5859, 'synset': 'black_box.n.01', 'name': 'black_box'}, {'id': 5860, 'synset': 'blackface.n.01', 'name': 'blackface'}, {'id': 5861, 'synset': 'blackjack.n.02', 'name': 'blackjack'}, {'id': 5862, 'synset': 'black_tie.n.02', 'name': 'black_tie'}, {'id': 5863, 'synset': 'blackwash.n.03', 'name': 'blackwash'}, {'id': 5864, 'synset': 'bladder.n.02', 'name': 'bladder'}, {'id': 5865, 'synset': 'blade.n.09', 'name': 'blade'}, {'id': 5866, 'synset': 'blade.n.08', 'name': 'blade'}, {'id': 5867, 'synset': 'blade.n.07', 'name': 'blade'}, {'id': 5868, 'synset': 'blank.n.04', 'name': 'blank'}, {'id': 5869, 'synset': 'blast_furnace.n.01', 'name': 'blast_furnace'}, {'id': 5870, 'synset': 'blasting_cap.n.01', 'name': 'blasting_cap'}, {'id': 5871, 'synset': 'blind.n.03', 'name': 'blind'}, {'id': 5872, 'synset': 'blind_curve.n.01', 'name': 'blind_curve'}, {'id': 5873, 'synset': 'blindfold.n.01', 'name': 'blindfold'}, {'id': 5874, 'synset': 'bling.n.01', 'name': 'bling'}, {'id': 5875, 'synset': 'blister_pack.n.01', 'name': 'blister_pack'}, {'id': 5876, 'synset': 'block.n.05', 'name': 'block'}, {'id': 5877, 'synset': 'blockade.n.02', 'name': 'blockade'}, {'id': 5878, 'synset': 'blockade-runner.n.01', 'name': 'blockade-runner'}, {'id': 5879, 'synset': 'block_and_tackle.n.01', 'name': 'block_and_tackle'}, {'id': 5880, 'synset': 'blockbuster.n.01', 'name': 'blockbuster'}, {'id': 5881, 'synset': 'blockhouse.n.01', 'name': 'blockhouse'}, {'id': 5882, 'synset': 'block_plane.n.01', 'name': 'block_plane'}, {'id': 5883, 'synset': 'bloodmobile.n.01', 'name': 'bloodmobile'}, {'id': 5884, 'synset': 'bloomers.n.01', 'name': 'bloomers'}, {'id': 5885, 'synset': 'blower.n.01', 'name': 'blower'}, {'id': 5886, 'synset': 'blowtorch.n.01', 'name': 'blowtorch'}, {'id': 5887, 'synset': 'blucher.n.02', 'name': 'blucher'}, {'id': 5888, 'synset': 'bludgeon.n.01', 'name': 'bludgeon'}, {'id': 5889, 'synset': 'blue.n.02', 'name': 'blue'}, {'id': 5890, 'synset': 'blue_chip.n.02', 'name': 'blue_chip'}, {'id': 5891, 'synset': 'blunderbuss.n.01', 'name': 'blunderbuss'}, {'id': 5892, 'synset': 'blunt_file.n.01', 'name': 'blunt_file'}, {'id': 5893, 'synset': 'boarding.n.02', 'name': 'boarding'}, {'id': 5894, 'synset': 'boarding_house.n.01', 'name': 'boarding_house'}, {'id': 5895, 'synset': 'boardroom.n.01', 'name': 'boardroom'}, {'id': 5896, 'synset': 'boards.n.02', 'name': 'boards'}, {'id': 5897, 'synset': 'boater.n.01', 'name': 'boater'}, {'id': 5898, 'synset': 'boat_hook.n.01', 'name': 'boat_hook'}, {'id': 5899, 'synset': 'boathouse.n.01', 'name': 'boathouse'}, {'id': 5900, 'synset': "boatswain's_chair.n.01", 'name': "boatswain's_chair"}, {'id': 5901, 'synset': 'boat_train.n.01', 'name': 'boat_train'}, {'id': 5902, 'synset': 'boatyard.n.01', 'name': 'boatyard'}, {'id': 5903, 'synset': 'bobsled.n.02', 'name': 'bobsled'}, {'id': 5904, 'synset': 'bobsled.n.01', 'name': 'bobsled'}, {'id': 5905, 'synset': 'bocce_ball.n.01', 'name': 'bocce_ball'}, {'id': 5906, 'synset': 'bodega.n.01', 'name': 'bodega'}, {'id': 5907, 'synset': 'bodice.n.01', 'name': 'bodice'}, {'id': 5908, 'synset': 'bodkin.n.04', 'name': 'bodkin'}, {'id': 5909, 'synset': 'bodkin.n.03', 'name': 'bodkin'}, {'id': 5910, 'synset': 'bodkin.n.02', 'name': 'bodkin'}, {'id': 5911, 'synset': 'body.n.11', 'name': 'body'}, {'id': 5912, 'synset': 'body_armor.n.01', 'name': 'body_armor'}, {'id': 5913, 'synset': 'body_lotion.n.01', 'name': 'body_lotion'}, {'id': 5914, 'synset': 'body_stocking.n.01', 'name': 'body_stocking'}, {'id': 5915, 'synset': 'body_plethysmograph.n.01', 'name': 'body_plethysmograph'}, {'id': 5916, 'synset': 'body_pad.n.01', 'name': 'body_pad'}, {'id': 5917, 'synset': 'bodywork.n.01', 'name': 'bodywork'}, {'id': 5918, 'synset': 'bofors_gun.n.01', 'name': 'Bofors_gun'}, {'id': 5919, 'synset': 'bogy.n.01', 'name': 'bogy'}, {'id': 5920, 'synset': 'boiler.n.01', 'name': 'boiler'}, {'id': 5921, 'synset': 'boiling_water_reactor.n.01', 'name': 'boiling_water_reactor'}, {'id': 5922, 'synset': 'bolero.n.02', 'name': 'bolero'}, {'id': 5923, 'synset': 'bollard.n.01', 'name': 'bollard'}, {'id': 5924, 'synset': 'bolo.n.02', 'name': 'bolo'}, {'id': 5925, 'synset': 'bolt.n.02', 'name': 'bolt'}, {'id': 5926, 'synset': 'bolt_cutter.n.01', 'name': 'bolt_cutter'}, {'id': 5927, 'synset': 'bomb.n.01', 'name': 'bomb'}, {'id': 5928, 'synset': 'bombazine.n.01', 'name': 'bombazine'}, {'id': 5929, 'synset': 'bomb_calorimeter.n.01', 'name': 'bomb_calorimeter'}, {'id': 5930, 'synset': 'bomber.n.01', 'name': 'bomber'}, {'id': 5931, 'synset': 'bomber_jacket.n.01', 'name': 'bomber_jacket'}, {'id': 5932, 'synset': 'bomblet.n.01', 'name': 'bomblet'}, {'id': 5933, 'synset': 'bomb_rack.n.01', 'name': 'bomb_rack'}, {'id': 5934, 'synset': 'bombshell.n.03', 'name': 'bombshell'}, {'id': 5935, 'synset': 'bomb_shelter.n.01', 'name': 'bomb_shelter'}, {'id': 5936, 'synset': 'bone-ash_cup.n.01', 'name': 'bone-ash_cup'}, {'id': 5937, 'synset': 'bone_china.n.01', 'name': 'bone_china'}, {'id': 5938, 'synset': 'bones.n.01', 'name': 'bones'}, {'id': 5939, 'synset': 'boneshaker.n.01', 'name': 'boneshaker'}, {'id': 5940, 'synset': 'bongo.n.01', 'name': 'bongo'}, {'id': 5941, 'synset': 'book.n.11', 'name': 'book'}, {'id': 5942, 'synset': 'book_bag.n.01', 'name': 'book_bag'}, {'id': 5943, 'synset': 'bookbindery.n.01', 'name': 'bookbindery'}, {'id': 5944, 'synset': 'bookend.n.01', 'name': 'bookend'}, {'id': 5945, 'synset': 'bookmobile.n.01', 'name': 'bookmobile'}, {'id': 5946, 'synset': 'bookshelf.n.01', 'name': 'bookshelf'}, {'id': 5947, 'synset': 'bookshop.n.01', 'name': 'bookshop'}, {'id': 5948, 'synset': 'boom.n.05', 'name': 'boom'}, {'id': 5949, 'synset': 'boomerang.n.01', 'name': 'boomerang'}, {'id': 5950, 'synset': 'booster.n.05', 'name': 'booster'}, {'id': 5951, 'synset': 'booster.n.04', 'name': 'booster'}, {'id': 5952, 'synset': 'boot.n.04', 'name': 'boot'}, {'id': 5953, 'synset': 'boot_camp.n.01', 'name': 'boot_camp'}, {'id': 5954, 'synset': 'bootee.n.01', 'name': 'bootee'}, {'id': 5955, 'synset': 'booth.n.02', 'name': 'booth'}, {'id': 5956, 'synset': 'booth.n.04', 'name': 'booth'}, {'id': 5957, 'synset': 'booth.n.01', 'name': 'booth'}, {'id': 5958, 'synset': 'boothose.n.01', 'name': 'boothose'}, {'id': 5959, 'synset': 'bootjack.n.01', 'name': 'bootjack'}, {'id': 5960, 'synset': 'bootlace.n.01', 'name': 'bootlace'}, {'id': 5961, 'synset': 'bootleg.n.02', 'name': 'bootleg'}, {'id': 5962, 'synset': 'bootstrap.n.01', 'name': 'bootstrap'}, {'id': 5963, 'synset': 'bore_bit.n.01', 'name': 'bore_bit'}, {'id': 5964, 'synset': 'boron_chamber.n.01', 'name': 'boron_chamber'}, {'id': 5965, 'synset': 'borstal.n.01', 'name': 'borstal'}, {'id': 5966, 'synset': 'bosom.n.03', 'name': 'bosom'}, {'id': 5967, 'synset': 'boston_rocker.n.01', 'name': 'Boston_rocker'}, {'id': 5968, 'synset': 'bota.n.01', 'name': 'bota'}, {'id': 5969, 'synset': 'bottle.n.03', 'name': 'bottle'}, {'id': 5970, 'synset': 'bottle_bank.n.01', 'name': 'bottle_bank'}, {'id': 5971, 'synset': 'bottlebrush.n.01', 'name': 'bottlebrush'}, {'id': 5972, 'synset': 'bottlecap.n.01', 'name': 'bottlecap'}, {'id': 5973, 'synset': 'bottling_plant.n.01', 'name': 'bottling_plant'}, {'id': 5974, 'synset': 'bottom.n.07', 'name': 'bottom'}, {'id': 5975, 'synset': 'boucle.n.01', 'name': 'boucle'}, {'id': 5976, 'synset': 'boudoir.n.01', 'name': 'boudoir'}, {'id': 5977, 'synset': 'boulle.n.01', 'name': 'boulle'}, {'id': 5978, 'synset': 'bouncing_betty.n.01', 'name': 'bouncing_betty'}, {'id': 5979, 'synset': 'boutique.n.01', 'name': 'boutique'}, {'id': 5980, 'synset': 'boutonniere.n.01', 'name': 'boutonniere'}, {'id': 5981, 'synset': 'bow.n.02', 'name': 'bow'}, {'id': 5982, 'synset': 'bow.n.01', 'name': 'bow'}, {'id': 5983, 'synset': 'bow_and_arrow.n.01', 'name': 'bow_and_arrow'}, {'id': 5984, 'synset': 'bowed_stringed_instrument.n.01', 'name': 'bowed_stringed_instrument'}, {'id': 5985, 'synset': 'bowie_knife.n.01', 'name': 'Bowie_knife'}, {'id': 5986, 'synset': 'bowl.n.01', 'name': 'bowl'}, {'id': 5987, 'synset': 'bowl.n.07', 'name': 'bowl'}, {'id': 5988, 'synset': 'bowline.n.01', 'name': 'bowline'}, {'id': 5989, 'synset': 'bowling_alley.n.01', 'name': 'bowling_alley'}, {'id': 5990, 'synset': 'bowling_equipment.n.01', 'name': 'bowling_equipment'}, {'id': 5991, 'synset': 'bowling_pin.n.01', 'name': 'bowling_pin'}, {'id': 5992, 'synset': 'bowling_shoe.n.01', 'name': 'bowling_shoe'}, {'id': 5993, 'synset': 'bowsprit.n.01', 'name': 'bowsprit'}, {'id': 5994, 'synset': 'bowstring.n.01', 'name': 'bowstring'}, {'id': 5995, 'synset': 'box.n.02', 'name': 'box'}, {'id': 5996, 'synset': 'box.n.08', 'name': 'box'}, {'id': 5997, 'synset': 'box_beam.n.01', 'name': 'box_beam'}, {'id': 5998, 'synset': 'box_camera.n.01', 'name': 'box_camera'}, {'id': 5999, 'synset': 'boxcar.n.01', 'name': 'boxcar'}, {'id': 6000, 'synset': 'box_coat.n.01', 'name': 'box_coat'}, {'id': 6001, 'synset': 'boxing_equipment.n.01', 'name': 'boxing_equipment'}, {'id': 6002, 'synset': 'box_office.n.02', 'name': 'box_office'}, {'id': 6003, 'synset': 'box_spring.n.01', 'name': 'box_spring'}, {'id': 6004, 'synset': 'box_wrench.n.01', 'name': 'box_wrench'}, {'id': 6005, 'synset': 'brace.n.09', 'name': 'brace'}, {'id': 6006, 'synset': 'brace.n.07', 'name': 'brace'}, {'id': 6007, 'synset': 'brace.n.01', 'name': 'brace'}, {'id': 6008, 'synset': 'brace_and_bit.n.01', 'name': 'brace_and_bit'}, {'id': 6009, 'synset': 'bracer.n.01', 'name': 'bracer'}, {'id': 6010, 'synset': 'brace_wrench.n.01', 'name': 'brace_wrench'}, {'id': 6011, 'synset': 'bracket.n.04', 'name': 'bracket'}, {'id': 6012, 'synset': 'bradawl.n.01', 'name': 'bradawl'}, {'id': 6013, 'synset': 'brake.n.01', 'name': 'brake'}, {'id': 6014, 'synset': 'brake.n.05', 'name': 'brake'}, {'id': 6015, 'synset': 'brake_band.n.01', 'name': 'brake_band'}, {'id': 6016, 'synset': 'brake_cylinder.n.01', 'name': 'brake_cylinder'}, {'id': 6017, 'synset': 'brake_disk.n.01', 'name': 'brake_disk'}, {'id': 6018, 'synset': 'brake_drum.n.01', 'name': 'brake_drum'}, {'id': 6019, 'synset': 'brake_lining.n.01', 'name': 'brake_lining'}, {'id': 6020, 'synset': 'brake_pad.n.01', 'name': 'brake_pad'}, {'id': 6021, 'synset': 'brake_pedal.n.01', 'name': 'brake_pedal'}, {'id': 6022, 'synset': 'brake_shoe.n.01', 'name': 'brake_shoe'}, {'id': 6023, 'synset': 'brake_system.n.01', 'name': 'brake_system'}, {'id': 6024, 'synset': 'brass.n.02', 'name': 'brass'}, {'id': 6025, 'synset': 'brass.n.05', 'name': 'brass'}, {'id': 6026, 'synset': 'brassard.n.01', 'name': 'brassard'}, {'id': 6027, 'synset': 'brasserie.n.01', 'name': 'brasserie'}, {'id': 6028, 'synset': 'brassie.n.01', 'name': 'brassie'}, {'id': 6029, 'synset': 'brass_knucks.n.01', 'name': 'brass_knucks'}, {'id': 6030, 'synset': 'brattice.n.01', 'name': 'brattice'}, {'id': 6031, 'synset': 'brazier.n.01', 'name': 'brazier'}, {'id': 6032, 'synset': 'breadbasket.n.03', 'name': 'breadbasket'}, {'id': 6033, 'synset': 'bread_knife.n.01', 'name': 'bread_knife'}, {'id': 6034, 'synset': 'breakable.n.01', 'name': 'breakable'}, {'id': 6035, 'synset': 'breakfast_area.n.01', 'name': 'breakfast_area'}, {'id': 6036, 'synset': 'breakfast_table.n.01', 'name': 'breakfast_table'}, {'id': 6037, 'synset': 'breakwater.n.01', 'name': 'breakwater'}, {'id': 6038, 'synset': 'breast_drill.n.01', 'name': 'breast_drill'}, {'id': 6039, 'synset': 'breast_implant.n.01', 'name': 'breast_implant'}, {'id': 6040, 'synset': 'breastplate.n.01', 'name': 'breastplate'}, {'id': 6041, 'synset': 'breast_pocket.n.01', 'name': 'breast_pocket'}, {'id': 6042, 'synset': 'breathalyzer.n.01', 'name': 'breathalyzer'}, {'id': 6043, 'synset': 'breechblock.n.01', 'name': 'breechblock'}, {'id': 6044, 'synset': 'breeches.n.01', 'name': 'breeches'}, {'id': 6045, 'synset': 'breeches_buoy.n.01', 'name': 'breeches_buoy'}, {'id': 6046, 'synset': 'breechloader.n.01', 'name': 'breechloader'}, {'id': 6047, 'synset': 'breeder_reactor.n.01', 'name': 'breeder_reactor'}, {'id': 6048, 'synset': 'bren.n.01', 'name': 'Bren'}, {'id': 6049, 'synset': 'brewpub.n.01', 'name': 'brewpub'}, {'id': 6050, 'synset': 'brick.n.01', 'name': 'brick'}, {'id': 6051, 'synset': 'brickkiln.n.01', 'name': 'brickkiln'}, {'id': 6052, 'synset': "bricklayer's_hammer.n.01", 'name': "bricklayer's_hammer"}, {'id': 6053, 'synset': 'brick_trowel.n.01', 'name': 'brick_trowel'}, {'id': 6054, 'synset': 'brickwork.n.01', 'name': 'brickwork'}, {'id': 6055, 'synset': 'bridge.n.01', 'name': 'bridge'}, {'id': 6056, 'synset': 'bridge.n.08', 'name': 'bridge'}, {'id': 6057, 'synset': 'bridle.n.01', 'name': 'bridle'}, {'id': 6058, 'synset': 'bridle_path.n.01', 'name': 'bridle_path'}, {'id': 6059, 'synset': 'bridoon.n.01', 'name': 'bridoon'}, {'id': 6060, 'synset': 'briefcase_bomb.n.01', 'name': 'briefcase_bomb'}, {'id': 6061, 'synset': 'briefcase_computer.n.01', 'name': 'briefcase_computer'}, {'id': 6062, 'synset': 'briefs.n.01', 'name': 'briefs'}, {'id': 6063, 'synset': 'brig.n.02', 'name': 'brig'}, {'id': 6064, 'synset': 'brig.n.01', 'name': 'brig'}, {'id': 6065, 'synset': 'brigandine.n.01', 'name': 'brigandine'}, {'id': 6066, 'synset': 'brigantine.n.01', 'name': 'brigantine'}, {'id': 6067, 'synset': 'brilliantine.n.01', 'name': 'brilliantine'}, {'id': 6068, 'synset': 'brilliant_pebble.n.01', 'name': 'brilliant_pebble'}, {'id': 6069, 'synset': 'brim.n.02', 'name': 'brim'}, {'id': 6070, 'synset': 'bristle_brush.n.01', 'name': 'bristle_brush'}, {'id': 6071, 'synset': 'britches.n.01', 'name': 'britches'}, {'id': 6072, 'synset': 'broad_arrow.n.03', 'name': 'broad_arrow'}, {'id': 6073, 'synset': 'broadax.n.01', 'name': 'broadax'}, {'id': 6074, 'synset': 'brochette.n.01', 'name': 'brochette'}, {'id': 6075, 'synset': 'broadcaster.n.02', 'name': 'broadcaster'}, {'id': 6076, 'synset': 'broadcloth.n.02', 'name': 'broadcloth'}, {'id': 6077, 'synset': 'broadcloth.n.01', 'name': 'broadcloth'}, {'id': 6078, 'synset': 'broad_hatchet.n.01', 'name': 'broad_hatchet'}, {'id': 6079, 'synset': 'broadloom.n.01', 'name': 'broadloom'}, {'id': 6080, 'synset': 'broadside.n.03', 'name': 'broadside'}, {'id': 6081, 'synset': 'broadsword.n.01', 'name': 'broadsword'}, {'id': 6082, 'synset': 'brocade.n.01', 'name': 'brocade'}, {'id': 6083, 'synset': 'brogan.n.01', 'name': 'brogan'}, {'id': 6084, 'synset': 'broiler.n.01', 'name': 'broiler'}, {'id': 6085, 'synset': 'broken_arch.n.01', 'name': 'broken_arch'}, {'id': 6086, 'synset': 'bronchoscope.n.01', 'name': 'bronchoscope'}, {'id': 6087, 'synset': 'broom_closet.n.01', 'name': 'broom_closet'}, {'id': 6088, 'synset': 'broomstick.n.01', 'name': 'broomstick'}, {'id': 6089, 'synset': 'brougham.n.01', 'name': 'brougham'}, {'id': 6090, 'synset': 'browning_automatic_rifle.n.01', 'name': 'Browning_automatic_rifle'}, {'id': 6091, 'synset': 'browning_machine_gun.n.01', 'name': 'Browning_machine_gun'}, {'id': 6092, 'synset': 'brownstone.n.02', 'name': 'brownstone'}, {'id': 6093, 'synset': 'brunch_coat.n.01', 'name': 'brunch_coat'}, {'id': 6094, 'synset': 'brush.n.02', 'name': 'brush'}, {'id': 6095, 'synset': 'brussels_carpet.n.01', 'name': 'Brussels_carpet'}, {'id': 6096, 'synset': 'brussels_lace.n.01', 'name': 'Brussels_lace'}, {'id': 6097, 'synset': 'bubble.n.04', 'name': 'bubble'}, {'id': 6098, 'synset': 'bubble_chamber.n.01', 'name': 'bubble_chamber'}, {'id': 6099, 'synset': 'bubble_jet_printer.n.01', 'name': 'bubble_jet_printer'}, {'id': 6100, 'synset': 'buckboard.n.01', 'name': 'buckboard'}, {'id': 6101, 'synset': 'bucket_seat.n.01', 'name': 'bucket_seat'}, {'id': 6102, 'synset': 'bucket_shop.n.02', 'name': 'bucket_shop'}, {'id': 6103, 'synset': 'buckle.n.01', 'name': 'buckle'}, {'id': 6104, 'synset': 'buckram.n.01', 'name': 'buckram'}, {'id': 6105, 'synset': 'bucksaw.n.01', 'name': 'bucksaw'}, {'id': 6106, 'synset': 'buckskins.n.01', 'name': 'buckskins'}, {'id': 6107, 'synset': 'buff.n.05', 'name': 'buff'}, {'id': 6108, 'synset': 'buffer.n.05', 'name': 'buffer'}, {'id': 6109, 'synset': 'buffer.n.04', 'name': 'buffer'}, {'id': 6110, 'synset': 'buffet.n.01', 'name': 'buffet'}, {'id': 6111, 'synset': 'buffing_wheel.n.01', 'name': 'buffing_wheel'}, {'id': 6112, 'synset': 'bugle.n.01', 'name': 'bugle'}, {'id': 6113, 'synset': 'building.n.01', 'name': 'building'}, {'id': 6114, 'synset': 'building_complex.n.01', 'name': 'building_complex'}, {'id': 6115, 'synset': 'bulldog_clip.n.01', 'name': 'bulldog_clip'}, {'id': 6116, 'synset': 'bulldog_wrench.n.01', 'name': 'bulldog_wrench'}, {'id': 6117, 'synset': 'bullet.n.01', 'name': 'bullet'}, {'id': 6118, 'synset': 'bullion.n.02', 'name': 'bullion'}, {'id': 6119, 'synset': 'bullnose.n.01', 'name': 'bullnose'}, {'id': 6120, 'synset': 'bullpen.n.02', 'name': 'bullpen'}, {'id': 6121, 'synset': 'bullpen.n.01', 'name': 'bullpen'}, {'id': 6122, 'synset': 'bullring.n.01', 'name': 'bullring'}, {'id': 6123, 'synset': 'bulwark.n.02', 'name': 'bulwark'}, {'id': 6124, 'synset': 'bumboat.n.01', 'name': 'bumboat'}, {'id': 6125, 'synset': 'bumper.n.02', 'name': 'bumper'}, {'id': 6126, 'synset': 'bumper.n.01', 'name': 'bumper'}, {'id': 6127, 'synset': 'bumper_car.n.01', 'name': 'bumper_car'}, {'id': 6128, 'synset': 'bumper_guard.n.01', 'name': 'bumper_guard'}, {'id': 6129, 'synset': 'bumper_jack.n.01', 'name': 'bumper_jack'}, {'id': 6130, 'synset': 'bundle.n.02', 'name': 'bundle'}, {'id': 6131, 'synset': 'bung.n.01', 'name': 'bung'}, {'id': 6132, 'synset': 'bungalow.n.01', 'name': 'bungalow'}, {'id': 6133, 'synset': 'bungee.n.01', 'name': 'bungee'}, {'id': 6134, 'synset': 'bunghole.n.02', 'name': 'bunghole'}, {'id': 6135, 'synset': 'bunk.n.03', 'name': 'bunk'}, {'id': 6136, 'synset': 'bunk.n.01', 'name': 'bunk'}, {'id': 6137, 'synset': 'bunker.n.01', 'name': 'bunker'}, {'id': 6138, 'synset': 'bunker.n.03', 'name': 'bunker'}, {'id': 6139, 'synset': 'bunker.n.02', 'name': 'bunker'}, {'id': 6140, 'synset': 'bunsen_burner.n.01', 'name': 'bunsen_burner'}, {'id': 6141, 'synset': 'bunting.n.01', 'name': 'bunting'}, {'id': 6142, 'synset': 'bur.n.02', 'name': 'bur'}, {'id': 6143, 'synset': 'burberry.n.01', 'name': 'Burberry'}, {'id': 6144, 'synset': 'burette.n.01', 'name': 'burette'}, {'id': 6145, 'synset': 'burglar_alarm.n.02', 'name': 'burglar_alarm'}, {'id': 6146, 'synset': 'burial_chamber.n.01', 'name': 'burial_chamber'}, {'id': 6147, 'synset': 'burial_garment.n.01', 'name': 'burial_garment'}, {'id': 6148, 'synset': 'burial_mound.n.01', 'name': 'burial_mound'}, {'id': 6149, 'synset': 'burin.n.01', 'name': 'burin'}, {'id': 6150, 'synset': 'burqa.n.01', 'name': 'burqa'}, {'id': 6151, 'synset': 'burlap.n.01', 'name': 'burlap'}, {'id': 6152, 'synset': 'burn_bag.n.01', 'name': 'burn_bag'}, {'id': 6153, 'synset': 'burner.n.01', 'name': 'burner'}, {'id': 6154, 'synset': 'burnous.n.01', 'name': 'burnous'}, {'id': 6155, 'synset': 'burp_gun.n.01', 'name': 'burp_gun'}, {'id': 6156, 'synset': 'burr.n.04', 'name': 'burr'}, {'id': 6157, 'synset': 'bushel_basket.n.01', 'name': 'bushel_basket'}, {'id': 6158, 'synset': 'bushing.n.02', 'name': 'bushing'}, {'id': 6159, 'synset': 'bush_jacket.n.01', 'name': 'bush_jacket'}, {'id': 6160, 'synset': 'business_suit.n.01', 'name': 'business_suit'}, {'id': 6161, 'synset': 'buskin.n.01', 'name': 'buskin'}, {'id': 6162, 'synset': 'bustier.n.01', 'name': 'bustier'}, {'id': 6163, 'synset': 'bustle.n.02', 'name': 'bustle'}, {'id': 6164, 'synset': 'butcher_knife.n.01', 'name': 'butcher_knife'}, {'id': 6165, 'synset': 'butcher_shop.n.01', 'name': 'butcher_shop'}, {'id': 6166, 'synset': 'butter_dish.n.01', 'name': 'butter_dish'}, {'id': 6167, 'synset': 'butterfly_valve.n.01', 'name': 'butterfly_valve'}, {'id': 6168, 'synset': 'butter_knife.n.01', 'name': 'butter_knife'}, {'id': 6169, 'synset': 'butt_hinge.n.01', 'name': 'butt_hinge'}, {'id': 6170, 'synset': 'butt_joint.n.01', 'name': 'butt_joint'}, {'id': 6171, 'synset': 'buttonhook.n.01', 'name': 'buttonhook'}, {'id': 6172, 'synset': 'buttress.n.01', 'name': 'buttress'}, {'id': 6173, 'synset': 'butt_shaft.n.01', 'name': 'butt_shaft'}, {'id': 6174, 'synset': 'butt_weld.n.01', 'name': 'butt_weld'}, {'id': 6175, 'synset': 'buzz_bomb.n.01', 'name': 'buzz_bomb'}, {'id': 6176, 'synset': 'buzzer.n.02', 'name': 'buzzer'}, {'id': 6177, 'synset': 'bvd.n.01', 'name': 'BVD'}, {'id': 6178, 'synset': 'bypass_condenser.n.01', 'name': 'bypass_condenser'}, {'id': 6179, 'synset': 'byway.n.01', 'name': 'byway'}, {'id': 6180, 'synset': 'cab.n.02', 'name': 'cab'}, {'id': 6181, 'synset': 'cab.n.01', 'name': 'cab'}, {'id': 6182, 'synset': 'cabaret.n.01', 'name': 'cabaret'}, {'id': 6183, 'synset': 'caber.n.01', 'name': 'caber'}, {'id': 6184, 'synset': 'cabin.n.03', 'name': 'cabin'}, {'id': 6185, 'synset': 'cabin.n.02', 'name': 'cabin'}, {'id': 6186, 'synset': 'cabin_class.n.01', 'name': 'cabin_class'}, {'id': 6187, 'synset': 'cabin_cruiser.n.01', 'name': 'cabin_cruiser'}, {'id': 6188, 'synset': 'cabinet.n.04', 'name': 'cabinet'}, {'id': 6189, 'synset': 'cabinetwork.n.01', 'name': 'cabinetwork'}, {'id': 6190, 'synset': 'cabin_liner.n.01', 'name': 'cabin_liner'}, {'id': 6191, 'synset': 'cable.n.06', 'name': 'cable'}, {'id': 6192, 'synset': 'cable.n.02', 'name': 'cable'}, {'id': 6193, 'synset': 'cable_car.n.01', 'name': 'cable_car'}, {'id': 6194, 'synset': 'cache.n.03', 'name': 'cache'}, {'id': 6195, 'synset': 'caddy.n.01', 'name': 'caddy'}, {'id': 6196, 'synset': 'caesium_clock.n.01', 'name': 'caesium_clock'}, {'id': 6197, 'synset': 'cafe.n.01', 'name': 'cafe'}, {'id': 6198, 'synset': 'cafeteria.n.01', 'name': 'cafeteria'}, {'id': 6199, 'synset': 'cafeteria_tray.n.01', 'name': 'cafeteria_tray'}, {'id': 6200, 'synset': 'caff.n.01', 'name': 'caff'}, {'id': 6201, 'synset': 'caftan.n.02', 'name': 'caftan'}, {'id': 6202, 'synset': 'caftan.n.01', 'name': 'caftan'}, {'id': 6203, 'synset': 'cage.n.01', 'name': 'cage'}, {'id': 6204, 'synset': 'cage.n.04', 'name': 'cage'}, {'id': 6205, 'synset': 'cagoule.n.01', 'name': 'cagoule'}, {'id': 6206, 'synset': 'caisson.n.02', 'name': 'caisson'}, {'id': 6207, 'synset': 'calash.n.02', 'name': 'calash'}, {'id': 6208, 'synset': 'calceus.n.01', 'name': 'calceus'}, {'id': 6209, 'synset': 'calcimine.n.01', 'name': 'calcimine'}, {'id': 6210, 'synset': 'caldron.n.01', 'name': 'caldron'}, {'id': 6211, 'synset': 'calico.n.01', 'name': 'calico'}, {'id': 6212, 'synset': 'caliper.n.01', 'name': 'caliper'}, {'id': 6213, 'synset': 'call-board.n.01', 'name': 'call-board'}, {'id': 6214, 'synset': 'call_center.n.01', 'name': 'call_center'}, {'id': 6215, 'synset': 'caller_id.n.01', 'name': 'caller_ID'}, {'id': 6216, 'synset': 'calliope.n.02', 'name': 'calliope'}, {'id': 6217, 'synset': 'calorimeter.n.01', 'name': 'calorimeter'}, {'id': 6218, 'synset': 'calpac.n.01', 'name': 'calpac'}, {'id': 6219, 'synset': 'camail.n.01', 'name': 'camail'}, {'id': 6220, 'synset': 'camber_arch.n.01', 'name': 'camber_arch'}, {'id': 6221, 'synset': 'cambric.n.01', 'name': 'cambric'}, {'id': 6222, 'synset': "camel's_hair.n.01", 'name': "camel's_hair"}, {'id': 6223, 'synset': 'camera_lucida.n.01', 'name': 'camera_lucida'}, {'id': 6224, 'synset': 'camera_obscura.n.01', 'name': 'camera_obscura'}, {'id': 6225, 'synset': 'camera_tripod.n.01', 'name': 'camera_tripod'}, {'id': 6226, 'synset': 'camise.n.01', 'name': 'camise'}, {'id': 6227, 'synset': 'camisole.n.02', 'name': 'camisole'}, {'id': 6228, 'synset': 'camisole.n.01', 'name': 'camisole'}, {'id': 6229, 'synset': 'camlet.n.02', 'name': 'camlet'}, {'id': 6230, 'synset': 'camouflage.n.03', 'name': 'camouflage'}, {'id': 6231, 'synset': 'camouflage.n.02', 'name': 'camouflage'}, {'id': 6232, 'synset': 'camp.n.01', 'name': 'camp'}, {'id': 6233, 'synset': 'camp.n.03', 'name': 'camp'}, {'id': 6234, 'synset': 'camp.n.07', 'name': 'camp'}, {'id': 6235, 'synset': 'campaign_hat.n.01', 'name': 'campaign_hat'}, {'id': 6236, 'synset': 'campanile.n.01', 'name': 'campanile'}, {'id': 6237, 'synset': 'camp_chair.n.01', 'name': 'camp_chair'}, {'id': 6238, 'synset': 'camper_trailer.n.01', 'name': 'camper_trailer'}, {'id': 6239, 'synset': 'campstool.n.01', 'name': 'campstool'}, {'id': 6240, 'synset': 'camshaft.n.01', 'name': 'camshaft'}, {'id': 6241, 'synset': 'canal.n.03', 'name': 'canal'}, {'id': 6242, 'synset': 'canal_boat.n.01', 'name': 'canal_boat'}, {'id': 6243, 'synset': 'candelabrum.n.01', 'name': 'candelabrum'}, {'id': 6244, 'synset': 'candid_camera.n.01', 'name': 'candid_camera'}, {'id': 6245, 'synset': 'candlepin.n.01', 'name': 'candlepin'}, {'id': 6246, 'synset': 'candlesnuffer.n.01', 'name': 'candlesnuffer'}, {'id': 6247, 'synset': 'candlewick.n.02', 'name': 'candlewick'}, {'id': 6248, 'synset': 'candy_thermometer.n.01', 'name': 'candy_thermometer'}, {'id': 6249, 'synset': 'cane.n.03', 'name': 'cane'}, {'id': 6250, 'synset': 'cangue.n.01', 'name': 'cangue'}, {'id': 6251, 'synset': 'cannery.n.01', 'name': 'cannery'}, {'id': 6252, 'synset': 'cannikin.n.02', 'name': 'cannikin'}, {'id': 6253, 'synset': 'cannikin.n.01', 'name': 'cannikin'}, {'id': 6254, 'synset': 'cannon.n.01', 'name': 'cannon'}, {'id': 6255, 'synset': 'cannon.n.04', 'name': 'cannon'}, {'id': 6256, 'synset': 'cannon.n.03', 'name': 'cannon'}, {'id': 6257, 'synset': 'cannon.n.02', 'name': 'cannon'}, {'id': 6258, 'synset': 'cannonball.n.01', 'name': 'cannonball'}, {'id': 6259, 'synset': 'canopic_jar.n.01', 'name': 'canopic_jar'}, {'id': 6260, 'synset': 'canopy.n.03', 'name': 'canopy'}, {'id': 6261, 'synset': 'canopy.n.02', 'name': 'canopy'}, {'id': 6262, 'synset': 'canopy.n.01', 'name': 'canopy'}, {'id': 6263, 'synset': 'canteen.n.05', 'name': 'canteen'}, {'id': 6264, 'synset': 'canteen.n.04', 'name': 'canteen'}, {'id': 6265, 'synset': 'canteen.n.03', 'name': 'canteen'}, {'id': 6266, 'synset': 'canteen.n.02', 'name': 'canteen'}, {'id': 6267, 'synset': 'cant_hook.n.01', 'name': 'cant_hook'}, {'id': 6268, 'synset': 'cantilever.n.01', 'name': 'cantilever'}, {'id': 6269, 'synset': 'cantilever_bridge.n.01', 'name': 'cantilever_bridge'}, {'id': 6270, 'synset': 'cantle.n.01', 'name': 'cantle'}, {'id': 6271, 'synset': 'canton_crepe.n.01', 'name': 'Canton_crepe'}, {'id': 6272, 'synset': 'canvas.n.01', 'name': 'canvas'}, {'id': 6273, 'synset': 'canvas.n.06', 'name': 'canvas'}, {'id': 6274, 'synset': 'canvas_tent.n.01', 'name': 'canvas_tent'}, {'id': 6275, 'synset': 'cap.n.04', 'name': 'cap'}, {'id': 6276, 'synset': 'capacitor.n.01', 'name': 'capacitor'}, {'id': 6277, 'synset': 'caparison.n.01', 'name': 'caparison'}, {'id': 6278, 'synset': 'capital_ship.n.01', 'name': 'capital_ship'}, {'id': 6279, 'synset': 'capitol.n.01', 'name': 'capitol'}, {'id': 6280, 'synset': 'cap_opener.n.01', 'name': 'cap_opener'}, {'id': 6281, 'synset': 'capote.n.02', 'name': 'capote'}, {'id': 6282, 'synset': 'capote.n.01', 'name': 'capote'}, {'id': 6283, 'synset': 'cap_screw.n.01', 'name': 'cap_screw'}, {'id': 6284, 'synset': 'capstan.n.01', 'name': 'capstan'}, {'id': 6285, 'synset': 'capstone.n.02', 'name': 'capstone'}, {'id': 6286, 'synset': 'capsule.n.01', 'name': 'capsule'}, {'id': 6287, 'synset': "captain's_chair.n.01", 'name': "captain's_chair"}, {'id': 6288, 'synset': 'carabiner.n.01', 'name': 'carabiner'}, {'id': 6289, 'synset': 'carafe.n.01', 'name': 'carafe'}, {'id': 6290, 'synset': 'caravansary.n.01', 'name': 'caravansary'}, {'id': 6291, 'synset': 'carbine.n.01', 'name': 'carbine'}, {'id': 6292, 'synset': 'car_bomb.n.01', 'name': 'car_bomb'}, {'id': 6293, 'synset': 'carbon_arc_lamp.n.01', 'name': 'carbon_arc_lamp'}, {'id': 6294, 'synset': 'carboy.n.01', 'name': 'carboy'}, {'id': 6295, 'synset': 'carburetor.n.01', 'name': 'carburetor'}, {'id': 6296, 'synset': 'car_carrier.n.01', 'name': 'car_carrier'}, {'id': 6297, 'synset': 'cardcase.n.01', 'name': 'cardcase'}, {'id': 6298, 'synset': 'cardiac_monitor.n.01', 'name': 'cardiac_monitor'}, {'id': 6299, 'synset': 'card_index.n.01', 'name': 'card_index'}, {'id': 6300, 'synset': 'cardiograph.n.01', 'name': 'cardiograph'}, {'id': 6301, 'synset': 'cardioid_microphone.n.01', 'name': 'cardioid_microphone'}, {'id': 6302, 'synset': 'car_door.n.01', 'name': 'car_door'}, {'id': 6303, 'synset': 'cardroom.n.01', 'name': 'cardroom'}, {'id': 6304, 'synset': 'card_table.n.02', 'name': 'card_table'}, {'id': 6305, 'synset': 'card_table.n.01', 'name': 'card_table'}, {'id': 6306, 'synset': 'car-ferry.n.01', 'name': 'car-ferry'}, {'id': 6307, 'synset': 'cargo_area.n.01', 'name': 'cargo_area'}, {'id': 6308, 'synset': 'cargo_container.n.01', 'name': 'cargo_container'}, {'id': 6309, 'synset': 'cargo_door.n.01', 'name': 'cargo_door'}, {'id': 6310, 'synset': 'cargo_hatch.n.01', 'name': 'cargo_hatch'}, {'id': 6311, 'synset': 'cargo_helicopter.n.01', 'name': 'cargo_helicopter'}, {'id': 6312, 'synset': 'cargo_liner.n.01', 'name': 'cargo_liner'}, {'id': 6313, 'synset': 'carillon.n.01', 'name': 'carillon'}, {'id': 6314, 'synset': 'car_mirror.n.01', 'name': 'car_mirror'}, {'id': 6315, 'synset': 'caroche.n.01', 'name': 'caroche'}, {'id': 6316, 'synset': 'carousel.n.02', 'name': 'carousel'}, {'id': 6317, 'synset': "carpenter's_hammer.n.01", 'name': "carpenter's_hammer"}, {'id': 6318, 'synset': "carpenter's_kit.n.01", 'name': "carpenter's_kit"}, {'id': 6319, 'synset': "carpenter's_level.n.01", 'name': "carpenter's_level"}, {'id': 6320, 'synset': "carpenter's_mallet.n.01", 'name': "carpenter's_mallet"}, {'id': 6321, 'synset': "carpenter's_rule.n.01", 'name': "carpenter's_rule"}, {'id': 6322, 'synset': "carpenter's_square.n.01", 'name': "carpenter's_square"}, {'id': 6323, 'synset': 'carpetbag.n.01', 'name': 'carpetbag'}, {'id': 6324, 'synset': 'carpet_beater.n.01', 'name': 'carpet_beater'}, {'id': 6325, 'synset': 'carpet_loom.n.01', 'name': 'carpet_loom'}, {'id': 6326, 'synset': 'carpet_pad.n.01', 'name': 'carpet_pad'}, {'id': 6327, 'synset': 'carpet_sweeper.n.01', 'name': 'carpet_sweeper'}, {'id': 6328, 'synset': 'carpet_tack.n.01', 'name': 'carpet_tack'}, {'id': 6329, 'synset': 'carport.n.01', 'name': 'carport'}, {'id': 6330, 'synset': 'carrack.n.01', 'name': 'carrack'}, {'id': 6331, 'synset': 'carrel.n.02', 'name': 'carrel'}, {'id': 6332, 'synset': 'carriage.n.04', 'name': 'carriage'}, {'id': 6333, 'synset': 'carriage_bolt.n.01', 'name': 'carriage_bolt'}, {'id': 6334, 'synset': 'carriageway.n.01', 'name': 'carriageway'}, {'id': 6335, 'synset': 'carriage_wrench.n.01', 'name': 'carriage_wrench'}, {'id': 6336, 'synset': 'carrick_bend.n.01', 'name': 'carrick_bend'}, {'id': 6337, 'synset': 'carrier.n.10', 'name': 'carrier'}, {'id': 6338, 'synset': 'carrycot.n.01', 'name': 'carrycot'}, {'id': 6339, 'synset': 'car_seat.n.01', 'name': 'car_seat'}, {'id': 6340, 'synset': 'car_tire.n.01', 'name': 'car_tire'}, {'id': 6341, 'synset': 'cartouche.n.01', 'name': 'cartouche'}, {'id': 6342, 'synset': 'car_train.n.01', 'name': 'car_train'}, {'id': 6343, 'synset': 'cartridge.n.01', 'name': 'cartridge'}, {'id': 6344, 'synset': 'cartridge.n.04', 'name': 'cartridge'}, {'id': 6345, 'synset': 'cartridge_belt.n.01', 'name': 'cartridge_belt'}, {'id': 6346, 'synset': 'cartridge_extractor.n.01', 'name': 'cartridge_extractor'}, {'id': 6347, 'synset': 'cartridge_fuse.n.01', 'name': 'cartridge_fuse'}, {'id': 6348, 'synset': 'cartridge_holder.n.01', 'name': 'cartridge_holder'}, {'id': 6349, 'synset': 'cartwheel.n.01', 'name': 'cartwheel'}, {'id': 6350, 'synset': 'carving_fork.n.01', 'name': 'carving_fork'}, {'id': 6351, 'synset': 'carving_knife.n.01', 'name': 'carving_knife'}, {'id': 6352, 'synset': 'car_wheel.n.01', 'name': 'car_wheel'}, {'id': 6353, 'synset': 'caryatid.n.01', 'name': 'caryatid'}, {'id': 6354, 'synset': 'cascade_liquefier.n.01', 'name': 'cascade_liquefier'}, {'id': 6355, 'synset': 'cascade_transformer.n.01', 'name': 'cascade_transformer'}, {'id': 6356, 'synset': 'case.n.05', 'name': 'case'}, {'id': 6357, 'synset': 'case.n.20', 'name': 'case'}, {'id': 6358, 'synset': 'case.n.18', 'name': 'case'}, {'id': 6359, 'synset': 'casein_paint.n.01', 'name': 'casein_paint'}, {'id': 6360, 'synset': 'case_knife.n.02', 'name': 'case_knife'}, {'id': 6361, 'synset': 'case_knife.n.01', 'name': 'case_knife'}, {'id': 6362, 'synset': 'casement.n.01', 'name': 'casement'}, {'id': 6363, 'synset': 'casement_window.n.01', 'name': 'casement_window'}, {'id': 6364, 'synset': 'casern.n.01', 'name': 'casern'}, {'id': 6365, 'synset': 'case_shot.n.01', 'name': 'case_shot'}, {'id': 6366, 'synset': 'cash_bar.n.01', 'name': 'cash_bar'}, {'id': 6367, 'synset': 'cashbox.n.01', 'name': 'cashbox'}, {'id': 6368, 'synset': 'cash_machine.n.01', 'name': 'cash_machine'}, {'id': 6369, 'synset': 'cashmere.n.01', 'name': 'cashmere'}, {'id': 6370, 'synset': 'casing.n.03', 'name': 'casing'}, {'id': 6371, 'synset': 'casino.n.01', 'name': 'casino'}, {'id': 6372, 'synset': 'casket.n.02', 'name': 'casket'}, {'id': 6373, 'synset': 'casque.n.01', 'name': 'casque'}, {'id': 6374, 'synset': 'casquet.n.01', 'name': 'casquet'}, {'id': 6375, 'synset': 'cassegrainian_telescope.n.01', 'name': 'Cassegrainian_telescope'}, {'id': 6376, 'synset': 'casserole.n.02', 'name': 'casserole'}, {'id': 6377, 'synset': 'cassette_deck.n.01', 'name': 'cassette_deck'}, {'id': 6378, 'synset': 'cassette_player.n.01', 'name': 'cassette_player'}, {'id': 6379, 'synset': 'cassette_recorder.n.01', 'name': 'cassette_recorder'}, {'id': 6380, 'synset': 'cassette_tape.n.01', 'name': 'cassette_tape'}, {'id': 6381, 'synset': 'cassock.n.01', 'name': 'cassock'}, {'id': 6382, 'synset': 'caster.n.03', 'name': 'caster'}, {'id': 6383, 'synset': 'caster.n.02', 'name': 'caster'}, {'id': 6384, 'synset': 'castle.n.02', 'name': 'castle'}, {'id': 6385, 'synset': 'castle.n.03', 'name': 'castle'}, {'id': 6386, 'synset': 'catacomb.n.01', 'name': 'catacomb'}, {'id': 6387, 'synset': 'catafalque.n.01', 'name': 'catafalque'}, {'id': 6388, 'synset': 'catalytic_converter.n.01', 'name': 'catalytic_converter'}, {'id': 6389, 'synset': 'catalytic_cracker.n.01', 'name': 'catalytic_cracker'}, {'id': 6390, 'synset': 'catamaran.n.01', 'name': 'catamaran'}, {'id': 6391, 'synset': 'catapult.n.03', 'name': 'catapult'}, {'id': 6392, 'synset': 'catapult.n.02', 'name': 'catapult'}, {'id': 6393, 'synset': 'catboat.n.01', 'name': 'catboat'}, {'id': 6394, 'synset': 'cat_box.n.01', 'name': 'cat_box'}, {'id': 6395, 'synset': 'catch.n.07', 'name': 'catch'}, {'id': 6396, 'synset': 'catchall.n.01', 'name': 'catchall'}, {'id': 6397, 'synset': "catcher's_mask.n.01", 'name': "catcher's_mask"}, {'id': 6398, 'synset': 'catchment.n.01', 'name': 'catchment'}, {'id': 6399, 'synset': 'caterpillar.n.02', 'name': 'Caterpillar'}, {'id': 6400, 'synset': 'cathedra.n.01', 'name': 'cathedra'}, {'id': 6401, 'synset': 'cathedral.n.01', 'name': 'cathedral'}, {'id': 6402, 'synset': 'cathedral.n.02', 'name': 'cathedral'}, {'id': 6403, 'synset': 'catheter.n.01', 'name': 'catheter'}, {'id': 6404, 'synset': 'cathode.n.01', 'name': 'cathode'}, {'id': 6405, 'synset': 'cathode-ray_tube.n.01', 'name': 'cathode-ray_tube'}, {'id': 6406, 'synset': "cat-o'-nine-tails.n.01", 'name': "cat-o'-nine-tails"}, {'id': 6407, 'synset': "cat's-paw.n.02", 'name': "cat's-paw"}, {'id': 6408, 'synset': 'catsup_bottle.n.01', 'name': 'catsup_bottle'}, {'id': 6409, 'synset': 'cattle_car.n.01', 'name': 'cattle_car'}, {'id': 6410, 'synset': 'cattle_guard.n.01', 'name': 'cattle_guard'}, {'id': 6411, 'synset': 'cattleship.n.01', 'name': 'cattleship'}, {'id': 6412, 'synset': 'cautery.n.01', 'name': 'cautery'}, {'id': 6413, 'synset': 'cavalier_hat.n.01', 'name': 'cavalier_hat'}, {'id': 6414, 'synset': 'cavalry_sword.n.01', 'name': 'cavalry_sword'}, {'id': 6415, 'synset': 'cavetto.n.01', 'name': 'cavetto'}, {'id': 6416, 'synset': 'cavity_wall.n.01', 'name': 'cavity_wall'}, {'id': 6417, 'synset': 'c_battery.n.01', 'name': 'C_battery'}, {'id': 6418, 'synset': 'c-clamp.n.01', 'name': 'C-clamp'}, {'id': 6419, 'synset': 'cd_drive.n.01', 'name': 'CD_drive'}, {'id': 6420, 'synset': 'cd-r.n.01', 'name': 'CD-R'}, {'id': 6421, 'synset': 'cd-rom.n.01', 'name': 'CD-ROM'}, {'id': 6422, 'synset': 'cd-rom_drive.n.01', 'name': 'CD-ROM_drive'}, {'id': 6423, 'synset': 'cedar_chest.n.01', 'name': 'cedar_chest'}, {'id': 6424, 'synset': 'ceiling.n.01', 'name': 'ceiling'}, {'id': 6425, 'synset': 'celesta.n.01', 'name': 'celesta'}, {'id': 6426, 'synset': 'cell.n.03', 'name': 'cell'}, {'id': 6427, 'synset': 'cell.n.07', 'name': 'cell'}, {'id': 6428, 'synset': 'cellar.n.03', 'name': 'cellar'}, {'id': 6429, 'synset': 'cellblock.n.01', 'name': 'cellblock'}, {'id': 6430, 'synset': 'cello.n.01', 'name': 'cello'}, {'id': 6431, 'synset': 'cellophane.n.01', 'name': 'cellophane'}, {'id': 6432, 'synset': 'cellulose_tape.n.01', 'name': 'cellulose_tape'}, {'id': 6433, 'synset': 'cenotaph.n.01', 'name': 'cenotaph'}, {'id': 6434, 'synset': 'censer.n.01', 'name': 'censer'}, {'id': 6435, 'synset': 'center.n.03', 'name': 'center'}, {'id': 6436, 'synset': 'center_punch.n.01', 'name': 'center_punch'}, {'id': 6437, 'synset': 'centigrade_thermometer.n.01', 'name': 'Centigrade_thermometer'}, {'id': 6438, 'synset': 'central_processing_unit.n.01', 'name': 'central_processing_unit'}, {'id': 6439, 'synset': 'centrifugal_pump.n.01', 'name': 'centrifugal_pump'}, {'id': 6440, 'synset': 'centrifuge.n.01', 'name': 'centrifuge'}, {'id': 6441, 'synset': 'ceramic.n.01', 'name': 'ceramic'}, {'id': 6442, 'synset': 'ceramic_ware.n.01', 'name': 'ceramic_ware'}, {'id': 6443, 'synset': 'cereal_bowl.n.01', 'name': 'cereal_bowl'}, {'id': 6444, 'synset': 'cereal_box.n.01', 'name': 'cereal_box'}, {'id': 6445, 'synset': 'cerecloth.n.01', 'name': 'cerecloth'}, {'id': 6446, 'synset': 'cesspool.n.01', 'name': 'cesspool'}, {'id': 6447, 'synset': 'chachka.n.02', 'name': 'chachka'}, {'id': 6448, 'synset': 'chador.n.01', 'name': 'chador'}, {'id': 6449, 'synset': 'chafing_dish.n.01', 'name': 'chafing_dish'}, {'id': 6450, 'synset': 'chain.n.03', 'name': 'chain'}, {'id': 6451, 'synset': 'chain.n.05', 'name': 'chain'}, {'id': 6452, 'synset': 'chainlink_fence.n.01', 'name': 'chainlink_fence'}, {'id': 6453, 'synset': 'chain_printer.n.01', 'name': 'chain_printer'}, {'id': 6454, 'synset': 'chain_saw.n.01', 'name': 'chain_saw'}, {'id': 6455, 'synset': 'chain_store.n.01', 'name': 'chain_store'}, {'id': 6456, 'synset': 'chain_tongs.n.01', 'name': 'chain_tongs'}, {'id': 6457, 'synset': 'chain_wrench.n.01', 'name': 'chain_wrench'}, {'id': 6458, 'synset': 'chair.n.05', 'name': 'chair'}, {'id': 6459, 'synset': 'chair_of_state.n.01', 'name': 'chair_of_state'}, {'id': 6460, 'synset': 'chairlift.n.01', 'name': 'chairlift'}, {'id': 6461, 'synset': 'chaise.n.02', 'name': 'chaise'}, {'id': 6462, 'synset': 'chalet.n.01', 'name': 'chalet'}, {'id': 6463, 'synset': 'chalk.n.04', 'name': 'chalk'}, {'id': 6464, 'synset': 'challis.n.01', 'name': 'challis'}, {'id': 6465, 'synset': 'chamberpot.n.01', 'name': 'chamberpot'}, {'id': 6466, 'synset': 'chambray.n.01', 'name': 'chambray'}, {'id': 6467, 'synset': 'chamfer_bit.n.01', 'name': 'chamfer_bit'}, {'id': 6468, 'synset': 'chamfer_plane.n.01', 'name': 'chamfer_plane'}, {'id': 6469, 'synset': 'chamois_cloth.n.01', 'name': 'chamois_cloth'}, {'id': 6470, 'synset': 'chancel.n.01', 'name': 'chancel'}, {'id': 6471, 'synset': 'chancellery.n.01', 'name': 'chancellery'}, {'id': 6472, 'synset': 'chancery.n.02', 'name': 'chancery'}, {'id': 6473, 'synset': 'chandlery.n.01', 'name': 'chandlery'}, {'id': 6474, 'synset': 'chanfron.n.01', 'name': 'chanfron'}, {'id': 6475, 'synset': 'chanter.n.01', 'name': 'chanter'}, {'id': 6476, 'synset': 'chantry.n.02', 'name': 'chantry'}, {'id': 6477, 'synset': 'chapel.n.01', 'name': 'chapel'}, {'id': 6478, 'synset': 'chapterhouse.n.02', 'name': 'chapterhouse'}, {'id': 6479, 'synset': 'chapterhouse.n.01', 'name': 'chapterhouse'}, {'id': 6480, 'synset': 'character_printer.n.01', 'name': 'character_printer'}, {'id': 6481, 'synset': 'charcuterie.n.01', 'name': 'charcuterie'}, {'id': 6482, 'synset': 'charge-exchange_accelerator.n.01', 'name': 'charge-exchange_accelerator'}, {'id': 6483, 'synset': 'charger.n.02', 'name': 'charger'}, {'id': 6484, 'synset': 'chariot.n.01', 'name': 'chariot'}, {'id': 6485, 'synset': 'chariot.n.02', 'name': 'chariot'}, {'id': 6486, 'synset': 'charnel_house.n.01', 'name': 'charnel_house'}, {'id': 6487, 'synset': 'chassis.n.03', 'name': 'chassis'}, {'id': 6488, 'synset': 'chassis.n.02', 'name': 'chassis'}, {'id': 6489, 'synset': 'chasuble.n.01', 'name': 'chasuble'}, {'id': 6490, 'synset': 'chateau.n.01', 'name': 'chateau'}, {'id': 6491, 'synset': 'chatelaine.n.02', 'name': 'chatelaine'}, {'id': 6492, 'synset': 'checker.n.03', 'name': 'checker'}, {'id': 6493, 'synset': 'checkout.n.03', 'name': 'checkout'}, {'id': 6494, 'synset': 'cheekpiece.n.01', 'name': 'cheekpiece'}, {'id': 6495, 'synset': 'cheeseboard.n.01', 'name': 'cheeseboard'}, {'id': 6496, 'synset': 'cheesecloth.n.01', 'name': 'cheesecloth'}, {'id': 6497, 'synset': 'cheese_cutter.n.01', 'name': 'cheese_cutter'}, {'id': 6498, 'synset': 'cheese_press.n.01', 'name': 'cheese_press'}, {'id': 6499, 'synset': 'chemical_bomb.n.01', 'name': 'chemical_bomb'}, {'id': 6500, 'synset': 'chemical_plant.n.01', 'name': 'chemical_plant'}, {'id': 6501, 'synset': 'chemical_reactor.n.01', 'name': 'chemical_reactor'}, {'id': 6502, 'synset': 'chemise.n.02', 'name': 'chemise'}, {'id': 6503, 'synset': 'chemise.n.01', 'name': 'chemise'}, {'id': 6504, 'synset': 'chenille.n.02', 'name': 'chenille'}, {'id': 6505, 'synset': 'chessman.n.01', 'name': 'chessman'}, {'id': 6506, 'synset': 'chest.n.02', 'name': 'chest'}, {'id': 6507, 'synset': 'chesterfield.n.02', 'name': 'chesterfield'}, {'id': 6508, 'synset': 'chest_of_drawers.n.01', 'name': 'chest_of_drawers'}, {'id': 6509, 'synset': 'chest_protector.n.01', 'name': 'chest_protector'}, {'id': 6510, 'synset': 'cheval-de-frise.n.01', 'name': 'cheval-de-frise'}, {'id': 6511, 'synset': 'cheval_glass.n.01', 'name': 'cheval_glass'}, {'id': 6512, 'synset': 'chicane.n.02', 'name': 'chicane'}, {'id': 6513, 'synset': 'chicken_coop.n.01', 'name': 'chicken_coop'}, {'id': 6514, 'synset': 'chicken_wire.n.01', 'name': 'chicken_wire'}, {'id': 6515, 'synset': 'chicken_yard.n.01', 'name': 'chicken_yard'}, {'id': 6516, 'synset': 'chiffon.n.01', 'name': 'chiffon'}, {'id': 6517, 'synset': 'chiffonier.n.01', 'name': 'chiffonier'}, {'id': 6518, 'synset': "child's_room.n.01", 'name': "child's_room"}, {'id': 6519, 'synset': 'chimney_breast.n.01', 'name': 'chimney_breast'}, {'id': 6520, 'synset': 'chimney_corner.n.01', 'name': 'chimney_corner'}, {'id': 6521, 'synset': 'china.n.02', 'name': 'china'}, {'id': 6522, 'synset': 'china_cabinet.n.01', 'name': 'china_cabinet'}, {'id': 6523, 'synset': 'chinchilla.n.02', 'name': 'chinchilla'}, {'id': 6524, 'synset': 'chinese_lantern.n.01', 'name': 'Chinese_lantern'}, {'id': 6525, 'synset': 'chinese_puzzle.n.01', 'name': 'Chinese_puzzle'}, {'id': 6526, 'synset': 'chinning_bar.n.01', 'name': 'chinning_bar'}, {'id': 6527, 'synset': 'chino.n.02', 'name': 'chino'}, {'id': 6528, 'synset': 'chino.n.01', 'name': 'chino'}, {'id': 6529, 'synset': 'chin_rest.n.01', 'name': 'chin_rest'}, {'id': 6530, 'synset': 'chin_strap.n.01', 'name': 'chin_strap'}, {'id': 6531, 'synset': 'chintz.n.01', 'name': 'chintz'}, {'id': 6532, 'synset': 'chip.n.07', 'name': 'chip'}, {'id': 6533, 'synset': 'chisel.n.01', 'name': 'chisel'}, {'id': 6534, 'synset': 'chlamys.n.02', 'name': 'chlamys'}, {'id': 6535, 'synset': 'choir.n.03', 'name': 'choir'}, {'id': 6536, 'synset': 'choir_loft.n.01', 'name': 'choir_loft'}, {'id': 6537, 'synset': 'choke.n.02', 'name': 'choke'}, {'id': 6538, 'synset': 'choke.n.01', 'name': 'choke'}, {'id': 6539, 'synset': 'chokey.n.01', 'name': 'chokey'}, {'id': 6540, 'synset': 'choo-choo.n.01', 'name': 'choo-choo'}, {'id': 6541, 'synset': 'chopine.n.01', 'name': 'chopine'}, {'id': 6542, 'synset': 'chordophone.n.01', 'name': 'chordophone'}, {'id': 6543, 'synset': 'christmas_stocking.n.01', 'name': 'Christmas_stocking'}, {'id': 6544, 'synset': 'chronograph.n.01', 'name': 'chronograph'}, {'id': 6545, 'synset': 'chronometer.n.01', 'name': 'chronometer'}, {'id': 6546, 'synset': 'chronoscope.n.01', 'name': 'chronoscope'}, {'id': 6547, 'synset': 'chuck.n.03', 'name': 'chuck'}, {'id': 6548, 'synset': 'chuck_wagon.n.01', 'name': 'chuck_wagon'}, {'id': 6549, 'synset': 'chukka.n.02', 'name': 'chukka'}, {'id': 6550, 'synset': 'church.n.02', 'name': 'church'}, {'id': 6551, 'synset': 'church_bell.n.01', 'name': 'church_bell'}, {'id': 6552, 'synset': 'church_hat.n.01', 'name': 'church_hat'}, {'id': 6553, 'synset': 'church_key.n.01', 'name': 'church_key'}, {'id': 6554, 'synset': 'church_tower.n.01', 'name': 'church_tower'}, {'id': 6555, 'synset': 'churidars.n.01', 'name': 'churidars'}, {'id': 6556, 'synset': 'churn.n.01', 'name': 'churn'}, {'id': 6557, 'synset': 'ciderpress.n.01', 'name': 'ciderpress'}, {'id': 6558, 'synset': 'cigar_band.n.01', 'name': 'cigar_band'}, {'id': 6559, 'synset': 'cigar_cutter.n.01', 'name': 'cigar_cutter'}, {'id': 6560, 'synset': 'cigarette_butt.n.01', 'name': 'cigarette_butt'}, {'id': 6561, 'synset': 'cigarette_holder.n.01', 'name': 'cigarette_holder'}, {'id': 6562, 'synset': 'cigar_lighter.n.01', 'name': 'cigar_lighter'}, {'id': 6563, 'synset': 'cinch.n.02', 'name': 'cinch'}, {'id': 6564, 'synset': 'cinema.n.02', 'name': 'cinema'}, {'id': 6565, 'synset': 'cinquefoil.n.02', 'name': 'cinquefoil'}, {'id': 6566, 'synset': 'circle.n.08', 'name': 'circle'}, {'id': 6567, 'synset': 'circlet.n.02', 'name': 'circlet'}, {'id': 6568, 'synset': 'circuit.n.01', 'name': 'circuit'}, {'id': 6569, 'synset': 'circuit_board.n.01', 'name': 'circuit_board'}, {'id': 6570, 'synset': 'circuit_breaker.n.01', 'name': 'circuit_breaker'}, {'id': 6571, 'synset': 'circuitry.n.01', 'name': 'circuitry'}, {'id': 6572, 'synset': 'circular_plane.n.01', 'name': 'circular_plane'}, {'id': 6573, 'synset': 'circular_saw.n.01', 'name': 'circular_saw'}, {'id': 6574, 'synset': 'circus_tent.n.01', 'name': 'circus_tent'}, {'id': 6575, 'synset': 'cistern.n.03', 'name': 'cistern'}, {'id': 6576, 'synset': 'cittern.n.01', 'name': 'cittern'}, {'id': 6577, 'synset': 'city_hall.n.01', 'name': 'city_hall'}, {'id': 6578, 'synset': 'cityscape.n.02', 'name': 'cityscape'}, {'id': 6579, 'synset': 'city_university.n.01', 'name': 'city_university'}, {'id': 6580, 'synset': 'civies.n.01', 'name': 'civies'}, {'id': 6581, 'synset': 'civilian_clothing.n.01', 'name': 'civilian_clothing'}, {'id': 6582, 'synset': 'clack_valve.n.01', 'name': 'clack_valve'}, {'id': 6583, 'synset': 'clamp.n.01', 'name': 'clamp'}, {'id': 6584, 'synset': 'clamshell.n.02', 'name': 'clamshell'}, {'id': 6585, 'synset': 'clapper.n.03', 'name': 'clapper'}, {'id': 6586, 'synset': 'clapperboard.n.01', 'name': 'clapperboard'}, {'id': 6587, 'synset': 'clarence.n.01', 'name': 'clarence'}, {'id': 6588, 'synset': 'clark_cell.n.01', 'name': 'Clark_cell'}, {'id': 6589, 'synset': 'clasp_knife.n.01', 'name': 'clasp_knife'}, {'id': 6590, 'synset': 'classroom.n.01', 'name': 'classroom'}, {'id': 6591, 'synset': 'clavichord.n.01', 'name': 'clavichord'}, {'id': 6592, 'synset': 'clavier.n.02', 'name': 'clavier'}, {'id': 6593, 'synset': 'clay_pigeon.n.01', 'name': 'clay_pigeon'}, {'id': 6594, 'synset': 'claymore_mine.n.01', 'name': 'claymore_mine'}, {'id': 6595, 'synset': 'claymore.n.01', 'name': 'claymore'}, {'id': 6596, 'synset': 'cleaners.n.01', 'name': 'cleaners'}, {'id': 6597, 'synset': 'cleaning_implement.n.01', 'name': 'cleaning_implement'}, {'id': 6598, 'synset': 'cleaning_pad.n.01', 'name': 'cleaning_pad'}, {'id': 6599, 'synset': 'clean_room.n.01', 'name': 'clean_room'}, {'id': 6600, 'synset': 'clearway.n.01', 'name': 'clearway'}, {'id': 6601, 'synset': 'cleat.n.01', 'name': 'cleat'}, {'id': 6602, 'synset': 'cleats.n.01', 'name': 'cleats'}, {'id': 6603, 'synset': 'cleaver.n.01', 'name': 'cleaver'}, {'id': 6604, 'synset': 'clerestory.n.01', 'name': 'clerestory'}, {'id': 6605, 'synset': 'clevis.n.01', 'name': 'clevis'}, {'id': 6606, 'synset': 'clews.n.01', 'name': 'clews'}, {'id': 6607, 'synset': 'cliff_dwelling.n.01', 'name': 'cliff_dwelling'}, {'id': 6608, 'synset': 'climbing_frame.n.01', 'name': 'climbing_frame'}, {'id': 6609, 'synset': 'clinch.n.03', 'name': 'clinch'}, {'id': 6610, 'synset': 'clinch.n.02', 'name': 'clinch'}, {'id': 6611, 'synset': 'clincher.n.03', 'name': 'clincher'}, {'id': 6612, 'synset': 'clinic.n.03', 'name': 'clinic'}, {'id': 6613, 'synset': 'clinical_thermometer.n.01', 'name': 'clinical_thermometer'}, {'id': 6614, 'synset': 'clinker.n.02', 'name': 'clinker'}, {'id': 6615, 'synset': 'clinometer.n.01', 'name': 'clinometer'}, {'id': 6616, 'synset': 'clip_lead.n.01', 'name': 'clip_lead'}, {'id': 6617, 'synset': 'clip-on.n.01', 'name': 'clip-on'}, {'id': 6618, 'synset': 'clipper.n.04', 'name': 'clipper'}, {'id': 6619, 'synset': 'clipper.n.02', 'name': 'clipper'}, {'id': 6620, 'synset': 'cloak.n.01', 'name': 'cloak'}, {'id': 6621, 'synset': 'cloakroom.n.02', 'name': 'cloakroom'}, {'id': 6622, 'synset': 'cloche.n.02', 'name': 'cloche'}, {'id': 6623, 'synset': 'cloche.n.01', 'name': 'cloche'}, {'id': 6624, 'synset': 'clock_pendulum.n.01', 'name': 'clock_pendulum'}, {'id': 6625, 'synset': 'clock_radio.n.01', 'name': 'clock_radio'}, {'id': 6626, 'synset': 'clockwork.n.01', 'name': 'clockwork'}, {'id': 6627, 'synset': 'clog.n.01', 'name': 'clog'}, {'id': 6628, 'synset': 'cloisonne.n.01', 'name': 'cloisonne'}, {'id': 6629, 'synset': 'cloister.n.02', 'name': 'cloister'}, {'id': 6630, 'synset': 'closed_circuit.n.01', 'name': 'closed_circuit'}, {'id': 6631, 'synset': 'closed-circuit_television.n.01', 'name': 'closed-circuit_television'}, {'id': 6632, 'synset': 'closed_loop.n.01', 'name': 'closed_loop'}, {'id': 6633, 'synset': 'closet.n.04', 'name': 'closet'}, {'id': 6634, 'synset': 'closeup_lens.n.01', 'name': 'closeup_lens'}, {'id': 6635, 'synset': 'cloth_cap.n.01', 'name': 'cloth_cap'}, {'id': 6636, 'synset': 'cloth_covering.n.01', 'name': 'cloth_covering'}, {'id': 6637, 'synset': 'clothesbrush.n.01', 'name': 'clothesbrush'}, {'id': 6638, 'synset': 'clothes_closet.n.01', 'name': 'clothes_closet'}, {'id': 6639, 'synset': 'clothes_dryer.n.01', 'name': 'clothes_dryer'}, {'id': 6640, 'synset': 'clotheshorse.n.01', 'name': 'clotheshorse'}, {'id': 6641, 'synset': 'clothes_tree.n.01', 'name': 'clothes_tree'}, {'id': 6642, 'synset': 'clothing.n.01', 'name': 'clothing'}, {'id': 6643, 'synset': 'clothing_store.n.01', 'name': 'clothing_store'}, {'id': 6644, 'synset': 'clout_nail.n.01', 'name': 'clout_nail'}, {'id': 6645, 'synset': 'clove_hitch.n.01', 'name': 'clove_hitch'}, {'id': 6646, 'synset': 'club_car.n.01', 'name': 'club_car'}, {'id': 6647, 'synset': 'clubroom.n.01', 'name': 'clubroom'}, {'id': 6648, 'synset': 'cluster_bomb.n.01', 'name': 'cluster_bomb'}, {'id': 6649, 'synset': 'clutch.n.07', 'name': 'clutch'}, {'id': 6650, 'synset': 'clutch.n.06', 'name': 'clutch'}, {'id': 6651, 'synset': 'coach.n.04', 'name': 'coach'}, {'id': 6652, 'synset': 'coach_house.n.01', 'name': 'coach_house'}, {'id': 6653, 'synset': 'coal_car.n.01', 'name': 'coal_car'}, {'id': 6654, 'synset': 'coal_chute.n.01', 'name': 'coal_chute'}, {'id': 6655, 'synset': 'coal_house.n.01', 'name': 'coal_house'}, {'id': 6656, 'synset': 'coal_shovel.n.01', 'name': 'coal_shovel'}, {'id': 6657, 'synset': 'coaming.n.01', 'name': 'coaming'}, {'id': 6658, 'synset': 'coaster_brake.n.01', 'name': 'coaster_brake'}, {'id': 6659, 'synset': 'coat_button.n.01', 'name': 'coat_button'}, {'id': 6660, 'synset': 'coat_closet.n.01', 'name': 'coat_closet'}, {'id': 6661, 'synset': 'coatdress.n.01', 'name': 'coatdress'}, {'id': 6662, 'synset': 'coatee.n.01', 'name': 'coatee'}, {'id': 6663, 'synset': 'coating.n.01', 'name': 'coating'}, {'id': 6664, 'synset': 'coating.n.03', 'name': 'coating'}, {'id': 6665, 'synset': 'coat_of_paint.n.01', 'name': 'coat_of_paint'}, {'id': 6666, 'synset': 'coattail.n.01', 'name': 'coattail'}, {'id': 6667, 'synset': 'coaxial_cable.n.01', 'name': 'coaxial_cable'}, {'id': 6668, 'synset': 'cobweb.n.03', 'name': 'cobweb'}, {'id': 6669, 'synset': 'cobweb.n.01', 'name': 'cobweb'}, {'id': 6670, 'synset': 'cockcroft_and_walton_accelerator.n.01', 'name': 'Cockcroft_and_Walton_accelerator'}, {'id': 6671, 'synset': 'cocked_hat.n.01', 'name': 'cocked_hat'}, {'id': 6672, 'synset': 'cockhorse.n.01', 'name': 'cockhorse'}, {'id': 6673, 'synset': 'cockleshell.n.01', 'name': 'cockleshell'}, {'id': 6674, 'synset': 'cockpit.n.01', 'name': 'cockpit'}, {'id': 6675, 'synset': 'cockpit.n.03', 'name': 'cockpit'}, {'id': 6676, 'synset': 'cockpit.n.02', 'name': 'cockpit'}, {'id': 6677, 'synset': 'cockscomb.n.03', 'name': 'cockscomb'}, {'id': 6678, 'synset': 'cocktail_dress.n.01', 'name': 'cocktail_dress'}, {'id': 6679, 'synset': 'cocktail_lounge.n.01', 'name': 'cocktail_lounge'}, {'id': 6680, 'synset': 'cocktail_shaker.n.01', 'name': 'cocktail_shaker'}, {'id': 6681, 'synset': 'cocotte.n.02', 'name': 'cocotte'}, {'id': 6682, 'synset': 'codpiece.n.01', 'name': 'codpiece'}, {'id': 6683, 'synset': 'coelostat.n.01', 'name': 'coelostat'}, {'id': 6684, 'synset': 'coffee_can.n.01', 'name': 'coffee_can'}, {'id': 6685, 'synset': 'coffee_cup.n.01', 'name': 'coffee_cup'}, {'id': 6686, 'synset': 'coffee_filter.n.01', 'name': 'coffee_filter'}, {'id': 6687, 'synset': 'coffee_mill.n.01', 'name': 'coffee_mill'}, {'id': 6688, 'synset': 'coffee_mug.n.01', 'name': 'coffee_mug'}, {'id': 6689, 'synset': 'coffee_stall.n.01', 'name': 'coffee_stall'}, {'id': 6690, 'synset': 'coffee_urn.n.01', 'name': 'coffee_urn'}, {'id': 6691, 'synset': 'coffer.n.02', 'name': 'coffer'}, {'id': 6692, 'synset': 'coffey_still.n.01', 'name': 'Coffey_still'}, {'id': 6693, 'synset': 'coffin.n.01', 'name': 'coffin'}, {'id': 6694, 'synset': 'cog.n.02', 'name': 'cog'}, {'id': 6695, 'synset': 'coif.n.02', 'name': 'coif'}, {'id': 6696, 'synset': 'coil.n.01', 'name': 'coil'}, {'id': 6697, 'synset': 'coil.n.06', 'name': 'coil'}, {'id': 6698, 'synset': 'coil.n.03', 'name': 'coil'}, {'id': 6699, 'synset': 'coil_spring.n.01', 'name': 'coil_spring'}, {'id': 6700, 'synset': 'coin_box.n.01', 'name': 'coin_box'}, {'id': 6701, 'synset': 'cold_cathode.n.01', 'name': 'cold_cathode'}, {'id': 6702, 'synset': 'cold_chisel.n.01', 'name': 'cold_chisel'}, {'id': 6703, 'synset': 'cold_cream.n.01', 'name': 'cold_cream'}, {'id': 6704, 'synset': 'cold_frame.n.01', 'name': 'cold_frame'}, {'id': 6705, 'synset': 'collar.n.01', 'name': 'collar'}, {'id': 6706, 'synset': 'collar.n.03', 'name': 'collar'}, {'id': 6707, 'synset': 'college.n.03', 'name': 'college'}, {'id': 6708, 'synset': 'collet.n.02', 'name': 'collet'}, {'id': 6709, 'synset': 'collider.n.01', 'name': 'collider'}, {'id': 6710, 'synset': 'colliery.n.01', 'name': 'colliery'}, {'id': 6711, 'synset': 'collimator.n.02', 'name': 'collimator'}, {'id': 6712, 'synset': 'collimator.n.01', 'name': 'collimator'}, {'id': 6713, 'synset': 'cologne.n.02', 'name': 'cologne'}, {'id': 6714, 'synset': 'colonnade.n.01', 'name': 'colonnade'}, {'id': 6715, 'synset': 'colonoscope.n.01', 'name': 'colonoscope'}, {'id': 6716, 'synset': 'colorimeter.n.01', 'name': 'colorimeter'}, {'id': 6717, 'synset': 'colors.n.02', 'name': 'colors'}, {'id': 6718, 'synset': 'color_television.n.01', 'name': 'color_television'}, {'id': 6719, 'synset': 'color_tube.n.01', 'name': 'color_tube'}, {'id': 6720, 'synset': 'color_wash.n.01', 'name': 'color_wash'}, {'id': 6721, 'synset': 'colt.n.02', 'name': 'Colt'}, {'id': 6722, 'synset': 'colter.n.01', 'name': 'colter'}, {'id': 6723, 'synset': 'columbarium.n.03', 'name': 'columbarium'}, {'id': 6724, 'synset': 'columbarium.n.02', 'name': 'columbarium'}, {'id': 6725, 'synset': 'column.n.07', 'name': 'column'}, {'id': 6726, 'synset': 'column.n.06', 'name': 'column'}, {'id': 6727, 'synset': 'comb.n.01', 'name': 'comb'}, {'id': 6728, 'synset': 'comb.n.03', 'name': 'comb'}, {'id': 6729, 'synset': 'comber.n.03', 'name': 'comber'}, {'id': 6730, 'synset': 'combination_plane.n.01', 'name': 'combination_plane'}, {'id': 6731, 'synset': 'combine.n.01', 'name': 'combine'}, {'id': 6732, 'synset': 'command_module.n.01', 'name': 'command_module'}, {'id': 6733, 'synset': 'commissary.n.01', 'name': 'commissary'}, {'id': 6734, 'synset': 'commissary.n.02', 'name': 'commissary'}, {'id': 6735, 'synset': 'commodity.n.01', 'name': 'commodity'}, {'id': 6736, 'synset': 'common_ax.n.01', 'name': 'common_ax'}, {'id': 6737, 'synset': 'common_room.n.01', 'name': 'common_room'}, {'id': 6738, 'synset': 'communications_satellite.n.01', 'name': 'communications_satellite'}, {'id': 6739, 'synset': 'communication_system.n.01', 'name': 'communication_system'}, {'id': 6740, 'synset': 'community_center.n.01', 'name': 'community_center'}, {'id': 6741, 'synset': 'commutator.n.01', 'name': 'commutator'}, {'id': 6742, 'synset': 'commuter.n.01', 'name': 'commuter'}, {'id': 6743, 'synset': 'compact.n.01', 'name': 'compact'}, {'id': 6744, 'synset': 'compact.n.03', 'name': 'compact'}, {'id': 6745, 'synset': 'compact_disk.n.01', 'name': 'compact_disk'}, {'id': 6746, 'synset': 'compact-disk_burner.n.01', 'name': 'compact-disk_burner'}, {'id': 6747, 'synset': 'companionway.n.01', 'name': 'companionway'}, {'id': 6748, 'synset': 'compartment.n.02', 'name': 'compartment'}, {'id': 6749, 'synset': 'compartment.n.01', 'name': 'compartment'}, {'id': 6750, 'synset': 'compass.n.04', 'name': 'compass'}, {'id': 6751, 'synset': 'compass_card.n.01', 'name': 'compass_card'}, {'id': 6752, 'synset': 'compass_saw.n.01', 'name': 'compass_saw'}, {'id': 6753, 'synset': 'compound.n.03', 'name': 'compound'}, {'id': 6754, 'synset': 'compound_lens.n.01', 'name': 'compound_lens'}, {'id': 6755, 'synset': 'compound_lever.n.01', 'name': 'compound_lever'}, {'id': 6756, 'synset': 'compound_microscope.n.01', 'name': 'compound_microscope'}, {'id': 6757, 'synset': 'compress.n.01', 'name': 'compress'}, {'id': 6758, 'synset': 'compression_bandage.n.01', 'name': 'compression_bandage'}, {'id': 6759, 'synset': 'compressor.n.01', 'name': 'compressor'}, {'id': 6760, 'synset': 'computer.n.01', 'name': 'computer'}, {'id': 6761, 'synset': 'computer_circuit.n.01', 'name': 'computer_circuit'}, {'id': 6762, 'synset': 'computerized_axial_tomography_scanner.n.01', 'name': 'computerized_axial_tomography_scanner'}, {'id': 6763, 'synset': 'computer_monitor.n.01', 'name': 'computer_monitor'}, {'id': 6764, 'synset': 'computer_network.n.01', 'name': 'computer_network'}, {'id': 6765, 'synset': 'computer_screen.n.01', 'name': 'computer_screen'}, {'id': 6766, 'synset': 'computer_store.n.01', 'name': 'computer_store'}, {'id': 6767, 'synset': 'computer_system.n.01', 'name': 'computer_system'}, {'id': 6768, 'synset': 'concentration_camp.n.01', 'name': 'concentration_camp'}, {'id': 6769, 'synset': 'concert_grand.n.01', 'name': 'concert_grand'}, {'id': 6770, 'synset': 'concert_hall.n.01', 'name': 'concert_hall'}, {'id': 6771, 'synset': 'concertina.n.02', 'name': 'concertina'}, {'id': 6772, 'synset': 'concertina.n.01', 'name': 'concertina'}, {'id': 6773, 'synset': 'concrete_mixer.n.01', 'name': 'concrete_mixer'}, {'id': 6774, 'synset': 'condensation_pump.n.01', 'name': 'condensation_pump'}, {'id': 6775, 'synset': 'condenser.n.04', 'name': 'condenser'}, {'id': 6776, 'synset': 'condenser.n.03', 'name': 'condenser'}, {'id': 6777, 'synset': 'condenser.n.02', 'name': 'condenser'}, {'id': 6778, 'synset': 'condenser_microphone.n.01', 'name': 'condenser_microphone'}, {'id': 6779, 'synset': 'condominium.n.02', 'name': 'condominium'}, {'id': 6780, 'synset': 'condominium.n.01', 'name': 'condominium'}, {'id': 6781, 'synset': 'conductor.n.04', 'name': 'conductor'}, {'id': 6782, 'synset': 'cone_clutch.n.01', 'name': 'cone_clutch'}, {'id': 6783, 'synset': 'confectionery.n.02', 'name': 'confectionery'}, {'id': 6784, 'synset': 'conference_center.n.01', 'name': 'conference_center'}, {'id': 6785, 'synset': 'conference_room.n.01', 'name': 'conference_room'}, {'id': 6786, 'synset': 'conference_table.n.01', 'name': 'conference_table'}, {'id': 6787, 'synset': 'confessional.n.01', 'name': 'confessional'}, {'id': 6788, 'synset': 'conformal_projection.n.01', 'name': 'conformal_projection'}, {'id': 6789, 'synset': 'congress_boot.n.01', 'name': 'congress_boot'}, {'id': 6790, 'synset': 'conic_projection.n.01', 'name': 'conic_projection'}, {'id': 6791, 'synset': 'connecting_rod.n.01', 'name': 'connecting_rod'}, {'id': 6792, 'synset': 'connecting_room.n.01', 'name': 'connecting_room'}, {'id': 6793, 'synset': 'connection.n.03', 'name': 'connection'}, {'id': 6794, 'synset': 'conning_tower.n.02', 'name': 'conning_tower'}, {'id': 6795, 'synset': 'conning_tower.n.01', 'name': 'conning_tower'}, {'id': 6796, 'synset': 'conservatory.n.03', 'name': 'conservatory'}, {'id': 6797, 'synset': 'conservatory.n.02', 'name': 'conservatory'}, {'id': 6798, 'synset': 'console.n.03', 'name': 'console'}, {'id': 6799, 'synset': 'console.n.02', 'name': 'console'}, {'id': 6800, 'synset': 'console_table.n.01', 'name': 'console_table'}, {'id': 6801, 'synset': 'consulate.n.01', 'name': 'consulate'}, {'id': 6802, 'synset': 'contact.n.07', 'name': 'contact'}, {'id': 6803, 'synset': 'contact.n.09', 'name': 'contact'}, {'id': 6804, 'synset': 'container.n.01', 'name': 'container'}, {'id': 6805, 'synset': 'container_ship.n.01', 'name': 'container_ship'}, {'id': 6806, 'synset': 'containment.n.02', 'name': 'containment'}, {'id': 6807, 'synset': 'contrabassoon.n.01', 'name': 'contrabassoon'}, {'id': 6808, 'synset': 'control_center.n.01', 'name': 'control_center'}, {'id': 6809, 'synset': 'control_circuit.n.01', 'name': 'control_circuit'}, {'id': 6810, 'synset': 'control_key.n.01', 'name': 'control_key'}, {'id': 6811, 'synset': 'control_panel.n.01', 'name': 'control_panel'}, {'id': 6812, 'synset': 'control_rod.n.01', 'name': 'control_rod'}, {'id': 6813, 'synset': 'control_room.n.01', 'name': 'control_room'}, {'id': 6814, 'synset': 'control_system.n.01', 'name': 'control_system'}, {'id': 6815, 'synset': 'control_tower.n.01', 'name': 'control_tower'}, {'id': 6816, 'synset': 'convector.n.01', 'name': 'convector'}, {'id': 6817, 'synset': 'convenience_store.n.01', 'name': 'convenience_store'}, {'id': 6818, 'synset': 'convent.n.01', 'name': 'convent'}, {'id': 6819, 'synset': 'conventicle.n.02', 'name': 'conventicle'}, {'id': 6820, 'synset': 'converging_lens.n.01', 'name': 'converging_lens'}, {'id': 6821, 'synset': 'converter.n.01', 'name': 'converter'}, {'id': 6822, 'synset': 'conveyance.n.03', 'name': 'conveyance'}, {'id': 6823, 'synset': 'conveyer_belt.n.01', 'name': 'conveyer_belt'}, {'id': 6824, 'synset': 'cookfire.n.01', 'name': 'cookfire'}, {'id': 6825, 'synset': 'cookhouse.n.02', 'name': 'cookhouse'}, {'id': 6826, 'synset': 'cookie_cutter.n.01', 'name': 'cookie_cutter'}, {'id': 6827, 'synset': 'cookie_jar.n.01', 'name': 'cookie_jar'}, {'id': 6828, 'synset': 'cookie_sheet.n.01', 'name': 'cookie_sheet'}, {'id': 6829, 'synset': 'cookstove.n.01', 'name': 'cookstove'}, {'id': 6830, 'synset': 'coolant_system.n.01', 'name': 'coolant_system'}, {'id': 6831, 'synset': 'cooling_system.n.02', 'name': 'cooling_system'}, {'id': 6832, 'synset': 'cooling_system.n.01', 'name': 'cooling_system'}, {'id': 6833, 'synset': 'cooling_tower.n.01', 'name': 'cooling_tower'}, {'id': 6834, 'synset': 'coonskin_cap.n.01', 'name': 'coonskin_cap'}, {'id': 6835, 'synset': 'cope.n.02', 'name': 'cope'}, {'id': 6836, 'synset': 'coping_saw.n.01', 'name': 'coping_saw'}, {'id': 6837, 'synset': 'copperware.n.01', 'name': 'copperware'}, {'id': 6838, 'synset': 'copyholder.n.01', 'name': 'copyholder'}, {'id': 6839, 'synset': 'coquille.n.02', 'name': 'coquille'}, {'id': 6840, 'synset': 'coracle.n.01', 'name': 'coracle'}, {'id': 6841, 'synset': 'corbel.n.01', 'name': 'corbel'}, {'id': 6842, 'synset': 'corbel_arch.n.01', 'name': 'corbel_arch'}, {'id': 6843, 'synset': 'corbel_step.n.01', 'name': 'corbel_step'}, {'id': 6844, 'synset': 'corbie_gable.n.01', 'name': 'corbie_gable'}, {'id': 6845, 'synset': 'cord.n.04', 'name': 'cord'}, {'id': 6846, 'synset': 'cord.n.03', 'name': 'cord'}, {'id': 6847, 'synset': 'cordage.n.02', 'name': 'cordage'}, {'id': 6848, 'synset': 'cords.n.01', 'name': 'cords'}, {'id': 6849, 'synset': 'core.n.10', 'name': 'core'}, {'id': 6850, 'synset': 'core_bit.n.01', 'name': 'core_bit'}, {'id': 6851, 'synset': 'core_drill.n.01', 'name': 'core_drill'}, {'id': 6852, 'synset': 'corer.n.01', 'name': 'corer'}, {'id': 6853, 'synset': 'corker.n.02', 'name': 'corker'}, {'id': 6854, 'synset': 'corncrib.n.01', 'name': 'corncrib'}, {'id': 6855, 'synset': 'corner.n.11', 'name': 'corner'}, {'id': 6856, 'synset': 'corner.n.03', 'name': 'corner'}, {'id': 6857, 'synset': 'corner_post.n.01', 'name': 'corner_post'}, {'id': 6858, 'synset': 'cornice.n.03', 'name': 'cornice'}, {'id': 6859, 'synset': 'cornice.n.02', 'name': 'cornice'}, {'id': 6860, 'synset': 'correctional_institution.n.01', 'name': 'correctional_institution'}, {'id': 6861, 'synset': 'corrugated_fastener.n.01', 'name': 'corrugated_fastener'}, {'id': 6862, 'synset': 'corselet.n.01', 'name': 'corselet'}, {'id': 6863, 'synset': 'cosmetic.n.01', 'name': 'cosmetic'}, {'id': 6864, 'synset': 'cosmotron.n.01', 'name': 'cosmotron'}, {'id': 6865, 'synset': 'costume.n.01', 'name': 'costume'}, {'id': 6866, 'synset': 'costume.n.02', 'name': 'costume'}, {'id': 6867, 'synset': 'costume.n.03', 'name': 'costume'}, {'id': 6868, 'synset': 'cosy.n.01', 'name': 'cosy'}, {'id': 6869, 'synset': 'cot.n.03', 'name': 'cot'}, {'id': 6870, 'synset': 'cottage_tent.n.01', 'name': 'cottage_tent'}, {'id': 6871, 'synset': 'cotter.n.03', 'name': 'cotter'}, {'id': 6872, 'synset': 'cotter_pin.n.01', 'name': 'cotter_pin'}, {'id': 6873, 'synset': 'cotton.n.02', 'name': 'cotton'}, {'id': 6874, 'synset': 'cotton_flannel.n.01', 'name': 'cotton_flannel'}, {'id': 6875, 'synset': 'cotton_mill.n.01', 'name': 'cotton_mill'}, {'id': 6876, 'synset': 'couch.n.03', 'name': 'couch'}, {'id': 6877, 'synset': 'couch.n.02', 'name': 'couch'}, {'id': 6878, 'synset': 'couchette.n.01', 'name': 'couchette'}, {'id': 6879, 'synset': 'coude_telescope.n.01', 'name': 'coude_telescope'}, {'id': 6880, 'synset': 'counter.n.01', 'name': 'counter'}, {'id': 6881, 'synset': 'counter.n.03', 'name': 'counter'}, {'id': 6882, 'synset': 'counter.n.02', 'name': 'counter'}, {'id': 6883, 'synset': 'counterbore.n.01', 'name': 'counterbore'}, {'id': 6884, 'synset': 'counter_tube.n.01', 'name': 'counter_tube'}, {'id': 6885, 'synset': 'country_house.n.01', 'name': 'country_house'}, {'id': 6886, 'synset': 'country_store.n.01', 'name': 'country_store'}, {'id': 6887, 'synset': 'coupe.n.01', 'name': 'coupe'}, {'id': 6888, 'synset': 'coupling.n.02', 'name': 'coupling'}, {'id': 6889, 'synset': 'court.n.10', 'name': 'court'}, {'id': 6890, 'synset': 'court.n.04', 'name': 'court'}, {'id': 6891, 'synset': 'court.n.02', 'name': 'court'}, {'id': 6892, 'synset': 'court.n.09', 'name': 'court'}, {'id': 6893, 'synset': 'courtelle.n.01', 'name': 'Courtelle'}, {'id': 6894, 'synset': 'courthouse.n.02', 'name': 'courthouse'}, {'id': 6895, 'synset': 'courthouse.n.01', 'name': 'courthouse'}, {'id': 6896, 'synset': 'covered_bridge.n.01', 'name': 'covered_bridge'}, {'id': 6897, 'synset': 'covered_couch.n.01', 'name': 'covered_couch'}, {'id': 6898, 'synset': 'covered_wagon.n.01', 'name': 'covered_wagon'}, {'id': 6899, 'synset': 'covering.n.02', 'name': 'covering'}, {'id': 6900, 'synset': 'coverlet.n.01', 'name': 'coverlet'}, {'id': 6901, 'synset': 'cover_plate.n.01', 'name': 'cover_plate'}, {'id': 6902, 'synset': 'cowbarn.n.01', 'name': 'cowbarn'}, {'id': 6903, 'synset': 'cowboy_boot.n.01', 'name': 'cowboy_boot'}, {'id': 6904, 'synset': 'cowhide.n.03', 'name': 'cowhide'}, {'id': 6905, 'synset': 'cowl.n.02', 'name': 'cowl'}, {'id': 6906, 'synset': 'cow_pen.n.01', 'name': 'cow_pen'}, {'id': 6907, 'synset': 'cpu_board.n.01', 'name': 'CPU_board'}, {'id': 6908, 'synset': 'crackle.n.02', 'name': 'crackle'}, {'id': 6909, 'synset': 'cradle.n.01', 'name': 'cradle'}, {'id': 6910, 'synset': 'craft.n.02', 'name': 'craft'}, {'id': 6911, 'synset': 'cramp.n.03', 'name': 'cramp'}, {'id': 6912, 'synset': 'crampon.n.02', 'name': 'crampon'}, {'id': 6913, 'synset': 'crampon.n.01', 'name': 'crampon'}, {'id': 6914, 'synset': 'crane.n.04', 'name': 'crane'}, {'id': 6915, 'synset': 'craniometer.n.01', 'name': 'craniometer'}, {'id': 6916, 'synset': 'crank.n.04', 'name': 'crank'}, {'id': 6917, 'synset': 'crankcase.n.01', 'name': 'crankcase'}, {'id': 6918, 'synset': 'crankshaft.n.01', 'name': 'crankshaft'}, {'id': 6919, 'synset': 'crash_barrier.n.01', 'name': 'crash_barrier'}, {'id': 6920, 'synset': 'crash_helmet.n.01', 'name': 'crash_helmet'}, {'id': 6921, 'synset': 'cravat.n.01', 'name': 'cravat'}, {'id': 6922, 'synset': 'crazy_quilt.n.01', 'name': 'crazy_quilt'}, {'id': 6923, 'synset': 'cream.n.03', 'name': 'cream'}, {'id': 6924, 'synset': 'creche.n.01', 'name': 'creche'}, {'id': 6925, 'synset': 'creche.n.02', 'name': 'creche'}, {'id': 6926, 'synset': 'credenza.n.01', 'name': 'credenza'}, {'id': 6927, 'synset': 'creel.n.01', 'name': 'creel'}, {'id': 6928, 'synset': 'crematory.n.02', 'name': 'crematory'}, {'id': 6929, 'synset': 'crematory.n.01', 'name': 'crematory'}, {'id': 6930, 'synset': 'crepe.n.03', 'name': 'crepe'}, {'id': 6931, 'synset': 'crepe_de_chine.n.01', 'name': 'crepe_de_Chine'}, {'id': 6932, 'synset': 'crescent_wrench.n.01', 'name': 'crescent_wrench'}, {'id': 6933, 'synset': 'cretonne.n.01', 'name': 'cretonne'}, {'id': 6934, 'synset': 'crib.n.03', 'name': 'crib'}, {'id': 6935, 'synset': 'cricket_ball.n.01', 'name': 'cricket_ball'}, {'id': 6936, 'synset': 'cricket_bat.n.01', 'name': 'cricket_bat'}, {'id': 6937, 'synset': 'cricket_equipment.n.01', 'name': 'cricket_equipment'}, {'id': 6938, 'synset': 'cringle.n.01', 'name': 'cringle'}, {'id': 6939, 'synset': 'crinoline.n.03', 'name': 'crinoline'}, {'id': 6940, 'synset': 'crinoline.n.02', 'name': 'crinoline'}, {'id': 6941, 'synset': 'crochet_needle.n.01', 'name': 'crochet_needle'}, {'id': 6942, 'synset': 'crock_pot.n.01', 'name': 'Crock_Pot'}, {'id': 6943, 'synset': 'crook.n.03', 'name': 'crook'}, {'id': 6944, 'synset': 'crookes_radiometer.n.01', 'name': 'Crookes_radiometer'}, {'id': 6945, 'synset': 'crookes_tube.n.01', 'name': 'Crookes_tube'}, {'id': 6946, 'synset': 'croquet_ball.n.01', 'name': 'croquet_ball'}, {'id': 6947, 'synset': 'croquet_equipment.n.01', 'name': 'croquet_equipment'}, {'id': 6948, 'synset': 'croquet_mallet.n.01', 'name': 'croquet_mallet'}, {'id': 6949, 'synset': 'cross.n.01', 'name': 'cross'}, {'id': 6950, 'synset': 'crossbar.n.03', 'name': 'crossbar'}, {'id': 6951, 'synset': 'crossbar.n.02', 'name': 'crossbar'}, {'id': 6952, 'synset': 'crossbench.n.01', 'name': 'crossbench'}, {'id': 6953, 'synset': 'cross_bit.n.01', 'name': 'cross_bit'}, {'id': 6954, 'synset': 'crossbow.n.01', 'name': 'crossbow'}, {'id': 6955, 'synset': 'crosscut_saw.n.01', 'name': 'crosscut_saw'}, {'id': 6956, 'synset': 'crossjack.n.01', 'name': 'crossjack'}, {'id': 6957, 'synset': 'crosspiece.n.02', 'name': 'crosspiece'}, {'id': 6958, 'synset': 'crotchet.n.04', 'name': 'crotchet'}, {'id': 6959, 'synset': "croupier's_rake.n.01", 'name': "croupier's_rake"}, {'id': 6960, 'synset': 'crown.n.11', 'name': 'crown'}, {'id': 6961, 'synset': 'crown_jewels.n.01', 'name': 'crown_jewels'}, {'id': 6962, 'synset': 'crown_lens.n.01', 'name': 'crown_lens'}, {'id': 6963, 'synset': "crow's_nest.n.01", 'name': "crow's_nest"}, {'id': 6964, 'synset': 'crucible.n.01', 'name': 'crucible'}, {'id': 6965, 'synset': 'cruet.n.01', 'name': 'cruet'}, {'id': 6966, 'synset': 'cruet-stand.n.01', 'name': 'cruet-stand'}, {'id': 6967, 'synset': 'cruise_control.n.01', 'name': 'cruise_control'}, {'id': 6968, 'synset': 'cruise_missile.n.01', 'name': 'cruise_missile'}, {'id': 6969, 'synset': 'cruiser.n.02', 'name': 'cruiser'}, {'id': 6970, 'synset': 'crupper.n.01', 'name': 'crupper'}, {'id': 6971, 'synset': 'cruse.n.01', 'name': 'cruse'}, {'id': 6972, 'synset': 'crusher.n.01', 'name': 'crusher'}, {'id': 6973, 'synset': 'cryometer.n.01', 'name': 'cryometer'}, {'id': 6974, 'synset': 'cryoscope.n.01', 'name': 'cryoscope'}, {'id': 6975, 'synset': 'cryostat.n.01', 'name': 'cryostat'}, {'id': 6976, 'synset': 'crypt.n.01', 'name': 'crypt'}, {'id': 6977, 'synset': 'crystal.n.06', 'name': 'crystal'}, {'id': 6978, 'synset': 'crystal_detector.n.01', 'name': 'crystal_detector'}, {'id': 6979, 'synset': 'crystal_microphone.n.01', 'name': 'crystal_microphone'}, {'id': 6980, 'synset': 'crystal_oscillator.n.01', 'name': 'crystal_oscillator'}, {'id': 6981, 'synset': 'crystal_set.n.01', 'name': 'crystal_set'}, {'id': 6982, 'synset': 'cubitiere.n.01', 'name': 'cubitiere'}, {'id': 6983, 'synset': 'cucking_stool.n.01', 'name': 'cucking_stool'}, {'id': 6984, 'synset': 'cuckoo_clock.n.01', 'name': 'cuckoo_clock'}, {'id': 6985, 'synset': 'cuddy.n.01', 'name': 'cuddy'}, {'id': 6986, 'synset': 'cudgel.n.01', 'name': 'cudgel'}, {'id': 6987, 'synset': 'cue.n.04', 'name': 'cue'}, {'id': 6988, 'synset': 'cue_ball.n.01', 'name': 'cue_ball'}, {'id': 6989, 'synset': 'cuff.n.01', 'name': 'cuff'}, {'id': 6990, 'synset': 'cuirass.n.01', 'name': 'cuirass'}, {'id': 6991, 'synset': 'cuisse.n.01', 'name': 'cuisse'}, {'id': 6992, 'synset': 'cul.n.01', 'name': 'cul'}, {'id': 6993, 'synset': 'culdoscope.n.01', 'name': 'culdoscope'}, {'id': 6994, 'synset': 'cullis.n.01', 'name': 'cullis'}, {'id': 6995, 'synset': 'culotte.n.01', 'name': 'culotte'}, {'id': 6996, 'synset': 'cultivator.n.02', 'name': 'cultivator'}, {'id': 6997, 'synset': 'culverin.n.02', 'name': 'culverin'}, {'id': 6998, 'synset': 'culverin.n.01', 'name': 'culverin'}, {'id': 6999, 'synset': 'culvert.n.01', 'name': 'culvert'}, {'id': 7000, 'synset': 'cup_hook.n.01', 'name': 'cup_hook'}, {'id': 7001, 'synset': 'cupola.n.02', 'name': 'cupola'}, {'id': 7002, 'synset': 'cupola.n.01', 'name': 'cupola'}, {'id': 7003, 'synset': 'curb.n.02', 'name': 'curb'}, {'id': 7004, 'synset': 'curb_roof.n.01', 'name': 'curb_roof'}, {'id': 7005, 'synset': 'curbstone.n.01', 'name': 'curbstone'}, {'id': 7006, 'synset': 'curette.n.01', 'name': 'curette'}, {'id': 7007, 'synset': 'currycomb.n.01', 'name': 'currycomb'}, {'id': 7008, 'synset': 'cursor.n.01', 'name': 'cursor'}, {'id': 7009, 'synset': 'customhouse.n.01', 'name': 'customhouse'}, {'id': 7010, 'synset': 'cutaway.n.01', 'name': 'cutaway'}, {'id': 7011, 'synset': 'cutlas.n.01', 'name': 'cutlas'}, {'id': 7012, 'synset': 'cutoff.n.03', 'name': 'cutoff'}, {'id': 7013, 'synset': 'cutout.n.01', 'name': 'cutout'}, {'id': 7014, 'synset': 'cutter.n.06', 'name': 'cutter'}, {'id': 7015, 'synset': 'cutter.n.05', 'name': 'cutter'}, {'id': 7016, 'synset': 'cutting_implement.n.01', 'name': 'cutting_implement'}, {'id': 7017, 'synset': 'cutting_room.n.01', 'name': 'cutting_room'}, {'id': 7018, 'synset': 'cutty_stool.n.01', 'name': 'cutty_stool'}, {'id': 7019, 'synset': 'cutwork.n.01', 'name': 'cutwork'}, {'id': 7020, 'synset': 'cybercafe.n.01', 'name': 'cybercafe'}, {'id': 7021, 'synset': 'cyclopean_masonry.n.01', 'name': 'cyclopean_masonry'}, {'id': 7022, 'synset': 'cyclostyle.n.01', 'name': 'cyclostyle'}, {'id': 7023, 'synset': 'cyclotron.n.01', 'name': 'cyclotron'}, {'id': 7024, 'synset': 'cylinder.n.03', 'name': 'cylinder'}, {'id': 7025, 'synset': 'cylinder_lock.n.01', 'name': 'cylinder_lock'}, {'id': 7026, 'synset': 'dacha.n.01', 'name': 'dacha'}, {'id': 7027, 'synset': 'dacron.n.01', 'name': 'Dacron'}, {'id': 7028, 'synset': 'dado.n.02', 'name': 'dado'}, {'id': 7029, 'synset': 'dado_plane.n.01', 'name': 'dado_plane'}, {'id': 7030, 'synset': 'dairy.n.01', 'name': 'dairy'}, {'id': 7031, 'synset': 'dais.n.01', 'name': 'dais'}, {'id': 7032, 'synset': 'daisy_print_wheel.n.01', 'name': 'daisy_print_wheel'}, {'id': 7033, 'synset': 'daisywheel_printer.n.01', 'name': 'daisywheel_printer'}, {'id': 7034, 'synset': 'dam.n.01', 'name': 'dam'}, {'id': 7035, 'synset': 'damask.n.02', 'name': 'damask'}, {'id': 7036, 'synset': 'dampener.n.01', 'name': 'dampener'}, {'id': 7037, 'synset': 'damper.n.02', 'name': 'damper'}, {'id': 7038, 'synset': 'damper_block.n.01', 'name': 'damper_block'}, {'id': 7039, 'synset': 'dark_lantern.n.01', 'name': 'dark_lantern'}, {'id': 7040, 'synset': 'darkroom.n.01', 'name': 'darkroom'}, {'id': 7041, 'synset': 'darning_needle.n.01', 'name': 'darning_needle'}, {'id': 7042, 'synset': 'dart.n.02', 'name': 'dart'}, {'id': 7043, 'synset': 'dart.n.01', 'name': 'dart'}, {'id': 7044, 'synset': 'dashboard.n.02', 'name': 'dashboard'}, {'id': 7045, 'synset': 'dashiki.n.01', 'name': 'dashiki'}, {'id': 7046, 'synset': 'dash-pot.n.01', 'name': 'dash-pot'}, {'id': 7047, 'synset': 'data_converter.n.01', 'name': 'data_converter'}, {'id': 7048, 'synset': 'data_input_device.n.01', 'name': 'data_input_device'}, {'id': 7049, 'synset': 'data_multiplexer.n.01', 'name': 'data_multiplexer'}, {'id': 7050, 'synset': 'data_system.n.01', 'name': 'data_system'}, {'id': 7051, 'synset': 'davenport.n.03', 'name': 'davenport'}, {'id': 7052, 'synset': 'davenport.n.02', 'name': 'davenport'}, {'id': 7053, 'synset': 'davit.n.01', 'name': 'davit'}, {'id': 7054, 'synset': 'daybed.n.01', 'name': 'daybed'}, {'id': 7055, 'synset': 'daybook.n.02', 'name': 'daybook'}, {'id': 7056, 'synset': 'day_nursery.n.01', 'name': 'day_nursery'}, {'id': 7057, 'synset': 'day_school.n.03', 'name': 'day_school'}, {'id': 7058, 'synset': 'dead_axle.n.01', 'name': 'dead_axle'}, {'id': 7059, 'synset': 'deadeye.n.02', 'name': 'deadeye'}, {'id': 7060, 'synset': 'deadhead.n.02', 'name': 'deadhead'}, {'id': 7061, 'synset': 'deanery.n.01', 'name': 'deanery'}, {'id': 7062, 'synset': 'deathbed.n.02', 'name': 'deathbed'}, {'id': 7063, 'synset': 'death_camp.n.01', 'name': 'death_camp'}, {'id': 7064, 'synset': 'death_house.n.01', 'name': 'death_house'}, {'id': 7065, 'synset': 'death_knell.n.02', 'name': 'death_knell'}, {'id': 7066, 'synset': 'death_seat.n.01', 'name': 'death_seat'}, {'id': 7067, 'synset': 'deck.n.02', 'name': 'deck'}, {'id': 7068, 'synset': 'deck.n.04', 'name': 'deck'}, {'id': 7069, 'synset': 'deck-house.n.01', 'name': 'deck-house'}, {'id': 7070, 'synset': 'deckle.n.02', 'name': 'deckle'}, {'id': 7071, 'synset': 'deckle_edge.n.01', 'name': 'deckle_edge'}, {'id': 7072, 'synset': 'declinometer.n.01', 'name': 'declinometer'}, {'id': 7073, 'synset': 'decoder.n.02', 'name': 'decoder'}, {'id': 7074, 'synset': 'decolletage.n.01', 'name': 'decolletage'}, {'id': 7075, 'synset': 'decoupage.n.01', 'name': 'decoupage'}, {'id': 7076, 'synset': 'dedicated_file_server.n.01', 'name': 'dedicated_file_server'}, {'id': 7077, 'synset': 'deep-freeze.n.01', 'name': 'deep-freeze'}, {'id': 7078, 'synset': 'deerstalker.n.01', 'name': 'deerstalker'}, {'id': 7079, 'synset': 'defense_system.n.01', 'name': 'defense_system'}, {'id': 7080, 'synset': 'defensive_structure.n.01', 'name': 'defensive_structure'}, {'id': 7081, 'synset': 'defibrillator.n.01', 'name': 'defibrillator'}, {'id': 7082, 'synset': 'defilade.n.01', 'name': 'defilade'}, {'id': 7083, 'synset': 'deflector.n.01', 'name': 'deflector'}, {'id': 7084, 'synset': 'delayed_action.n.01', 'name': 'delayed_action'}, {'id': 7085, 'synset': 'delay_line.n.01', 'name': 'delay_line'}, {'id': 7086, 'synset': 'delft.n.01', 'name': 'delft'}, {'id': 7087, 'synset': 'delicatessen.n.02', 'name': 'delicatessen'}, {'id': 7088, 'synset': 'delivery_truck.n.01', 'name': 'delivery_truck'}, {'id': 7089, 'synset': 'delta_wing.n.01', 'name': 'delta_wing'}, {'id': 7090, 'synset': 'demijohn.n.01', 'name': 'demijohn'}, {'id': 7091, 'synset': 'demitasse.n.02', 'name': 'demitasse'}, {'id': 7092, 'synset': 'den.n.04', 'name': 'den'}, {'id': 7093, 'synset': 'denim.n.02', 'name': 'denim'}, {'id': 7094, 'synset': 'densimeter.n.01', 'name': 'densimeter'}, {'id': 7095, 'synset': 'densitometer.n.01', 'name': 'densitometer'}, {'id': 7096, 'synset': 'dental_appliance.n.01', 'name': 'dental_appliance'}, {'id': 7097, 'synset': 'dental_implant.n.01', 'name': 'dental_implant'}, {'id': 7098, 'synset': "dentist's_drill.n.01", 'name': "dentist's_drill"}, {'id': 7099, 'synset': 'denture.n.01', 'name': 'denture'}, {'id': 7100, 'synset': 'deodorant.n.01', 'name': 'deodorant'}, {'id': 7101, 'synset': 'department_store.n.01', 'name': 'department_store'}, {'id': 7102, 'synset': 'departure_lounge.n.01', 'name': 'departure_lounge'}, {'id': 7103, 'synset': 'depilatory.n.02', 'name': 'depilatory'}, {'id': 7104, 'synset': 'depressor.n.03', 'name': 'depressor'}, {'id': 7105, 'synset': 'depth_finder.n.01', 'name': 'depth_finder'}, {'id': 7106, 'synset': 'depth_gauge.n.01', 'name': 'depth_gauge'}, {'id': 7107, 'synset': 'derrick.n.02', 'name': 'derrick'}, {'id': 7108, 'synset': 'derrick.n.01', 'name': 'derrick'}, {'id': 7109, 'synset': 'derringer.n.01', 'name': 'derringer'}, {'id': 7110, 'synset': 'desk_phone.n.01', 'name': 'desk_phone'}, {'id': 7111, 'synset': 'desktop_computer.n.01', 'name': 'desktop_computer'}, {'id': 7112, 'synset': 'dessert_spoon.n.01', 'name': 'dessert_spoon'}, {'id': 7113, 'synset': 'destroyer.n.01', 'name': 'destroyer'}, {'id': 7114, 'synset': 'destroyer_escort.n.01', 'name': 'destroyer_escort'}, {'id': 7115, 'synset': 'detached_house.n.01', 'name': 'detached_house'}, {'id': 7116, 'synset': 'detector.n.01', 'name': 'detector'}, {'id': 7117, 'synset': 'detector.n.03', 'name': 'detector'}, {'id': 7118, 'synset': 'detention_home.n.01', 'name': 'detention_home'}, {'id': 7119, 'synset': 'detonating_fuse.n.01', 'name': 'detonating_fuse'}, {'id': 7120, 'synset': 'detonator.n.01', 'name': 'detonator'}, {'id': 7121, 'synset': 'developer.n.02', 'name': 'developer'}, {'id': 7122, 'synset': 'device.n.01', 'name': 'device'}, {'id': 7123, 'synset': 'dewar_flask.n.01', 'name': 'Dewar_flask'}, {'id': 7124, 'synset': 'dhoti.n.01', 'name': 'dhoti'}, {'id': 7125, 'synset': 'dhow.n.01', 'name': 'dhow'}, {'id': 7126, 'synset': 'dial.n.04', 'name': 'dial'}, {'id': 7127, 'synset': 'dial.n.03', 'name': 'dial'}, {'id': 7128, 'synset': 'dial.n.02', 'name': 'dial'}, {'id': 7129, 'synset': 'dialog_box.n.01', 'name': 'dialog_box'}, {'id': 7130, 'synset': 'dial_telephone.n.01', 'name': 'dial_telephone'}, {'id': 7131, 'synset': 'dialyzer.n.01', 'name': 'dialyzer'}, {'id': 7132, 'synset': 'diamante.n.02', 'name': 'diamante'}, {'id': 7133, 'synset': 'diaper.n.02', 'name': 'diaper'}, {'id': 7134, 'synset': 'diaphone.n.01', 'name': 'diaphone'}, {'id': 7135, 'synset': 'diaphragm.n.01', 'name': 'diaphragm'}, {'id': 7136, 'synset': 'diaphragm.n.04', 'name': 'diaphragm'}, {'id': 7137, 'synset': 'diathermy_machine.n.01', 'name': 'diathermy_machine'}, {'id': 7138, 'synset': 'dibble.n.01', 'name': 'dibble'}, {'id': 7139, 'synset': 'dice_cup.n.01', 'name': 'dice_cup'}, {'id': 7140, 'synset': 'dicer.n.01', 'name': 'dicer'}, {'id': 7141, 'synset': 'dickey.n.02', 'name': 'dickey'}, {'id': 7142, 'synset': 'dickey.n.01', 'name': 'dickey'}, {'id': 7143, 'synset': 'dictaphone.n.01', 'name': 'Dictaphone'}, {'id': 7144, 'synset': 'die.n.03', 'name': 'die'}, {'id': 7145, 'synset': 'diesel.n.02', 'name': 'diesel'}, {'id': 7146, 'synset': 'diesel-electric_locomotive.n.01', 'name': 'diesel-electric_locomotive'}, {'id': 7147, 'synset': 'diesel-hydraulic_locomotive.n.01', 'name': 'diesel-hydraulic_locomotive'}, {'id': 7148, 'synset': 'diesel_locomotive.n.01', 'name': 'diesel_locomotive'}, {'id': 7149, 'synset': 'diestock.n.01', 'name': 'diestock'}, {'id': 7150, 'synset': 'differential_analyzer.n.01', 'name': 'differential_analyzer'}, {'id': 7151, 'synset': 'differential_gear.n.01', 'name': 'differential_gear'}, {'id': 7152, 'synset': 'diffuser.n.02', 'name': 'diffuser'}, {'id': 7153, 'synset': 'diffuser.n.01', 'name': 'diffuser'}, {'id': 7154, 'synset': 'digester.n.01', 'name': 'digester'}, {'id': 7155, 'synset': 'diggings.n.02', 'name': 'diggings'}, {'id': 7156, 'synset': 'digital-analog_converter.n.01', 'name': 'digital-analog_converter'}, {'id': 7157, 'synset': 'digital_audiotape.n.01', 'name': 'digital_audiotape'}, {'id': 7158, 'synset': 'digital_camera.n.01', 'name': 'digital_camera'}, {'id': 7159, 'synset': 'digital_clock.n.01', 'name': 'digital_clock'}, {'id': 7160, 'synset': 'digital_computer.n.01', 'name': 'digital_computer'}, {'id': 7161, 'synset': 'digital_display.n.01', 'name': 'digital_display'}, {'id': 7162, 'synset': 'digital_subscriber_line.n.01', 'name': 'digital_subscriber_line'}, {'id': 7163, 'synset': 'digital_voltmeter.n.01', 'name': 'digital_voltmeter'}, {'id': 7164, 'synset': 'digital_watch.n.01', 'name': 'digital_watch'}, {'id': 7165, 'synset': 'digitizer.n.01', 'name': 'digitizer'}, {'id': 7166, 'synset': 'dilator.n.03', 'name': 'dilator'}, {'id': 7167, 'synset': 'dildo.n.01', 'name': 'dildo'}, {'id': 7168, 'synset': 'dimity.n.01', 'name': 'dimity'}, {'id': 7169, 'synset': 'dimmer.n.01', 'name': 'dimmer'}, {'id': 7170, 'synset': 'diner.n.03', 'name': 'diner'}, {'id': 7171, 'synset': 'dinette.n.01', 'name': 'dinette'}, {'id': 7172, 'synset': 'dining_area.n.01', 'name': 'dining_area'}, {'id': 7173, 'synset': 'dining_car.n.01', 'name': 'dining_car'}, {'id': 7174, 'synset': 'dining-hall.n.01', 'name': 'dining-hall'}, {'id': 7175, 'synset': 'dining_room.n.01', 'name': 'dining_room'}, {'id': 7176, 'synset': 'dining-room_furniture.n.01', 'name': 'dining-room_furniture'}, {'id': 7177, 'synset': 'dining-room_table.n.01', 'name': 'dining-room_table'}, {'id': 7178, 'synset': 'dinner_bell.n.01', 'name': 'dinner_bell'}, {'id': 7179, 'synset': 'dinner_dress.n.01', 'name': 'dinner_dress'}, {'id': 7180, 'synset': 'dinner_napkin.n.01', 'name': 'dinner_napkin'}, {'id': 7181, 'synset': 'dinner_pail.n.01', 'name': 'dinner_pail'}, {'id': 7182, 'synset': 'dinner_table.n.01', 'name': 'dinner_table'}, {'id': 7183, 'synset': 'dinner_theater.n.01', 'name': 'dinner_theater'}, {'id': 7184, 'synset': 'diode.n.02', 'name': 'diode'}, {'id': 7185, 'synset': 'diode.n.01', 'name': 'diode'}, {'id': 7186, 'synset': 'dip.n.07', 'name': 'dip'}, {'id': 7187, 'synset': 'diplomatic_building.n.01', 'name': 'diplomatic_building'}, {'id': 7188, 'synset': 'dipole.n.02', 'name': 'dipole'}, {'id': 7189, 'synset': 'dipper.n.01', 'name': 'dipper'}, {'id': 7190, 'synset': 'dipstick.n.01', 'name': 'dipstick'}, {'id': 7191, 'synset': 'dip_switch.n.01', 'name': 'DIP_switch'}, {'id': 7192, 'synset': 'directional_antenna.n.01', 'name': 'directional_antenna'}, {'id': 7193, 'synset': 'directional_microphone.n.01', 'name': 'directional_microphone'}, {'id': 7194, 'synset': 'direction_finder.n.01', 'name': 'direction_finder'}, {'id': 7195, 'synset': 'dirk.n.01', 'name': 'dirk'}, {'id': 7196, 'synset': 'dirndl.n.02', 'name': 'dirndl'}, {'id': 7197, 'synset': 'dirndl.n.01', 'name': 'dirndl'}, {'id': 7198, 'synset': 'dirty_bomb.n.01', 'name': 'dirty_bomb'}, {'id': 7199, 'synset': 'discharge_lamp.n.01', 'name': 'discharge_lamp'}, {'id': 7200, 'synset': 'discharge_pipe.n.01', 'name': 'discharge_pipe'}, {'id': 7201, 'synset': 'disco.n.02', 'name': 'disco'}, {'id': 7202, 'synset': 'discount_house.n.01', 'name': 'discount_house'}, {'id': 7203, 'synset': 'discus.n.02', 'name': 'discus'}, {'id': 7204, 'synset': 'disguise.n.02', 'name': 'disguise'}, {'id': 7205, 'synset': 'dishpan.n.01', 'name': 'dishpan'}, {'id': 7206, 'synset': 'dish_rack.n.01', 'name': 'dish_rack'}, {'id': 7207, 'synset': 'disk.n.02', 'name': 'disk'}, {'id': 7208, 'synset': 'disk_brake.n.01', 'name': 'disk_brake'}, {'id': 7209, 'synset': 'disk_clutch.n.01', 'name': 'disk_clutch'}, {'id': 7210, 'synset': 'disk_controller.n.01', 'name': 'disk_controller'}, {'id': 7211, 'synset': 'disk_drive.n.01', 'name': 'disk_drive'}, {'id': 7212, 'synset': 'diskette.n.01', 'name': 'diskette'}, {'id': 7213, 'synset': 'disk_harrow.n.01', 'name': 'disk_harrow'}, {'id': 7214, 'synset': 'dispatch_case.n.01', 'name': 'dispatch_case'}, {'id': 7215, 'synset': 'dispensary.n.01', 'name': 'dispensary'}, {'id': 7216, 'synset': 'display.n.06', 'name': 'display'}, {'id': 7217, 'synset': 'display_adapter.n.01', 'name': 'display_adapter'}, {'id': 7218, 'synset': 'display_panel.n.01', 'name': 'display_panel'}, {'id': 7219, 'synset': 'display_window.n.01', 'name': 'display_window'}, {'id': 7220, 'synset': 'disposal.n.04', 'name': 'disposal'}, {'id': 7221, 'synset': 'disrupting_explosive.n.01', 'name': 'disrupting_explosive'}, {'id': 7222, 'synset': 'distaff.n.02', 'name': 'distaff'}, {'id': 7223, 'synset': 'distillery.n.01', 'name': 'distillery'}, {'id': 7224, 'synset': 'distributor.n.04', 'name': 'distributor'}, {'id': 7225, 'synset': 'distributor_cam.n.01', 'name': 'distributor_cam'}, {'id': 7226, 'synset': 'distributor_cap.n.01', 'name': 'distributor_cap'}, {'id': 7227, 'synset': 'distributor_housing.n.01', 'name': 'distributor_housing'}, {'id': 7228, 'synset': 'distributor_point.n.01', 'name': 'distributor_point'}, {'id': 7229, 'synset': 'ditch.n.01', 'name': 'ditch'}, {'id': 7230, 'synset': 'ditch_spade.n.01', 'name': 'ditch_spade'}, {'id': 7231, 'synset': 'ditty_bag.n.01', 'name': 'ditty_bag'}, {'id': 7232, 'synset': 'divan.n.01', 'name': 'divan'}, {'id': 7233, 'synset': 'divan.n.04', 'name': 'divan'}, {'id': 7234, 'synset': 'dive_bomber.n.01', 'name': 'dive_bomber'}, {'id': 7235, 'synset': 'diverging_lens.n.01', 'name': 'diverging_lens'}, {'id': 7236, 'synset': 'divided_highway.n.01', 'name': 'divided_highway'}, {'id': 7237, 'synset': 'divider.n.04', 'name': 'divider'}, {'id': 7238, 'synset': 'diving_bell.n.01', 'name': 'diving_bell'}, {'id': 7239, 'synset': 'divining_rod.n.01', 'name': 'divining_rod'}, {'id': 7240, 'synset': 'diving_suit.n.01', 'name': 'diving_suit'}, {'id': 7241, 'synset': 'dixie.n.02', 'name': 'dixie'}, {'id': 7242, 'synset': 'dock.n.05', 'name': 'dock'}, {'id': 7243, 'synset': 'doeskin.n.02', 'name': 'doeskin'}, {'id': 7244, 'synset': 'dogcart.n.01', 'name': 'dogcart'}, {'id': 7245, 'synset': 'doggie_bag.n.01', 'name': 'doggie_bag'}, {'id': 7246, 'synset': 'dogsled.n.01', 'name': 'dogsled'}, {'id': 7247, 'synset': 'dog_wrench.n.01', 'name': 'dog_wrench'}, {'id': 7248, 'synset': 'doily.n.01', 'name': 'doily'}, {'id': 7249, 'synset': 'dolly.n.02', 'name': 'dolly'}, {'id': 7250, 'synset': 'dolman.n.02', 'name': 'dolman'}, {'id': 7251, 'synset': 'dolman.n.01', 'name': 'dolman'}, {'id': 7252, 'synset': 'dolman_sleeve.n.01', 'name': 'dolman_sleeve'}, {'id': 7253, 'synset': 'dolmen.n.01', 'name': 'dolmen'}, {'id': 7254, 'synset': 'dome.n.04', 'name': 'dome'}, {'id': 7255, 'synset': 'dome.n.03', 'name': 'dome'}, {'id': 7256, 'synset': 'domino.n.03', 'name': 'domino'}, {'id': 7257, 'synset': 'dongle.n.01', 'name': 'dongle'}, {'id': 7258, 'synset': 'donkey_jacket.n.01', 'name': 'donkey_jacket'}, {'id': 7259, 'synset': 'door.n.01', 'name': 'door'}, {'id': 7260, 'synset': 'door.n.05', 'name': 'door'}, {'id': 7261, 'synset': 'door.n.04', 'name': 'door'}, {'id': 7262, 'synset': 'doorbell.n.01', 'name': 'doorbell'}, {'id': 7263, 'synset': 'doorframe.n.01', 'name': 'doorframe'}, {'id': 7264, 'synset': 'doorjamb.n.01', 'name': 'doorjamb'}, {'id': 7265, 'synset': 'doorlock.n.01', 'name': 'doorlock'}, {'id': 7266, 'synset': 'doornail.n.01', 'name': 'doornail'}, {'id': 7267, 'synset': 'doorplate.n.01', 'name': 'doorplate'}, {'id': 7268, 'synset': 'doorsill.n.01', 'name': 'doorsill'}, {'id': 7269, 'synset': 'doorstop.n.01', 'name': 'doorstop'}, {'id': 7270, 'synset': 'doppler_radar.n.01', 'name': 'Doppler_radar'}, {'id': 7271, 'synset': 'dormer.n.01', 'name': 'dormer'}, {'id': 7272, 'synset': 'dormer_window.n.01', 'name': 'dormer_window'}, {'id': 7273, 'synset': 'dormitory.n.01', 'name': 'dormitory'}, {'id': 7274, 'synset': 'dormitory.n.02', 'name': 'dormitory'}, {'id': 7275, 'synset': 'dosemeter.n.01', 'name': 'dosemeter'}, {'id': 7276, 'synset': 'dossal.n.01', 'name': 'dossal'}, {'id': 7277, 'synset': 'dot_matrix_printer.n.01', 'name': 'dot_matrix_printer'}, {'id': 7278, 'synset': 'double_bed.n.01', 'name': 'double_bed'}, {'id': 7279, 'synset': 'double-bitted_ax.n.01', 'name': 'double-bitted_ax'}, {'id': 7280, 'synset': 'double_boiler.n.01', 'name': 'double_boiler'}, {'id': 7281, 'synset': 'double-breasted_jacket.n.01', 'name': 'double-breasted_jacket'}, {'id': 7282, 'synset': 'double-breasted_suit.n.01', 'name': 'double-breasted_suit'}, {'id': 7283, 'synset': 'double_door.n.01', 'name': 'double_door'}, {'id': 7284, 'synset': 'double_glazing.n.01', 'name': 'double_glazing'}, {'id': 7285, 'synset': 'double-hung_window.n.01', 'name': 'double-hung_window'}, {'id': 7286, 'synset': 'double_knit.n.01', 'name': 'double_knit'}, {'id': 7287, 'synset': 'doubler.n.01', 'name': 'doubler'}, {'id': 7288, 'synset': 'double_reed.n.02', 'name': 'double_reed'}, {'id': 7289, 'synset': 'double-reed_instrument.n.01', 'name': 'double-reed_instrument'}, {'id': 7290, 'synset': 'doublet.n.01', 'name': 'doublet'}, {'id': 7291, 'synset': 'doubletree.n.01', 'name': 'doubletree'}, {'id': 7292, 'synset': 'douche.n.01', 'name': 'douche'}, {'id': 7293, 'synset': 'dovecote.n.01', 'name': 'dovecote'}, {'id': 7294, 'synset': "dover's_powder.n.01", 'name': "Dover's_powder"}, {'id': 7295, 'synset': 'dovetail.n.01', 'name': 'dovetail'}, {'id': 7296, 'synset': 'dovetail_plane.n.01', 'name': 'dovetail_plane'}, {'id': 7297, 'synset': 'dowel.n.01', 'name': 'dowel'}, {'id': 7298, 'synset': 'downstage.n.01', 'name': 'downstage'}, {'id': 7299, 'synset': 'drafting_instrument.n.01', 'name': 'drafting_instrument'}, {'id': 7300, 'synset': 'drafting_table.n.01', 'name': 'drafting_table'}, {'id': 7301, 'synset': 'dragunov.n.01', 'name': 'Dragunov'}, {'id': 7302, 'synset': 'drainage_ditch.n.01', 'name': 'drainage_ditch'}, {'id': 7303, 'synset': 'drainage_system.n.01', 'name': 'drainage_system'}, {'id': 7304, 'synset': 'drain_basket.n.01', 'name': 'drain_basket'}, {'id': 7305, 'synset': 'drainplug.n.01', 'name': 'drainplug'}, {'id': 7306, 'synset': 'drape.n.03', 'name': 'drape'}, {'id': 7307, 'synset': 'drapery.n.02', 'name': 'drapery'}, {'id': 7308, 'synset': 'drawbar.n.01', 'name': 'drawbar'}, {'id': 7309, 'synset': 'drawbridge.n.01', 'name': 'drawbridge'}, {'id': 7310, 'synset': 'drawing_chalk.n.01', 'name': 'drawing_chalk'}, {'id': 7311, 'synset': 'drawing_room.n.01', 'name': 'drawing_room'}, {'id': 7312, 'synset': 'drawing_room.n.02', 'name': 'drawing_room'}, {'id': 7313, 'synset': 'drawknife.n.01', 'name': 'drawknife'}, {'id': 7314, 'synset': 'drawstring_bag.n.01', 'name': 'drawstring_bag'}, {'id': 7315, 'synset': 'dray.n.01', 'name': 'dray'}, {'id': 7316, 'synset': 'dreadnought.n.01', 'name': 'dreadnought'}, {'id': 7317, 'synset': 'dredge.n.01', 'name': 'dredge'}, {'id': 7318, 'synset': 'dredger.n.01', 'name': 'dredger'}, {'id': 7319, 'synset': 'dredging_bucket.n.01', 'name': 'dredging_bucket'}, {'id': 7320, 'synset': 'dress_blues.n.01', 'name': 'dress_blues'}, {'id': 7321, 'synset': 'dressing.n.04', 'name': 'dressing'}, {'id': 7322, 'synset': 'dressing_case.n.01', 'name': 'dressing_case'}, {'id': 7323, 'synset': 'dressing_gown.n.01', 'name': 'dressing_gown'}, {'id': 7324, 'synset': 'dressing_room.n.01', 'name': 'dressing_room'}, {'id': 7325, 'synset': 'dressing_sack.n.01', 'name': 'dressing_sack'}, {'id': 7326, 'synset': 'dressing_table.n.01', 'name': 'dressing_table'}, {'id': 7327, 'synset': 'dress_rack.n.01', 'name': 'dress_rack'}, {'id': 7328, 'synset': 'dress_shirt.n.01', 'name': 'dress_shirt'}, {'id': 7329, 'synset': 'dress_uniform.n.01', 'name': 'dress_uniform'}, {'id': 7330, 'synset': 'drift_net.n.01', 'name': 'drift_net'}, {'id': 7331, 'synset': 'electric_drill.n.01', 'name': 'electric_drill'}, {'id': 7332, 'synset': 'drilling_platform.n.01', 'name': 'drilling_platform'}, {'id': 7333, 'synset': 'drill_press.n.01', 'name': 'drill_press'}, {'id': 7334, 'synset': 'drill_rig.n.01', 'name': 'drill_rig'}, {'id': 7335, 'synset': 'drinking_fountain.n.01', 'name': 'drinking_fountain'}, {'id': 7336, 'synset': 'drinking_vessel.n.01', 'name': 'drinking_vessel'}, {'id': 7337, 'synset': 'drip_loop.n.01', 'name': 'drip_loop'}, {'id': 7338, 'synset': 'drip_mat.n.01', 'name': 'drip_mat'}, {'id': 7339, 'synset': 'drip_pan.n.02', 'name': 'drip_pan'}, {'id': 7340, 'synset': 'dripping_pan.n.01', 'name': 'dripping_pan'}, {'id': 7341, 'synset': 'drip_pot.n.01', 'name': 'drip_pot'}, {'id': 7342, 'synset': 'drive.n.02', 'name': 'drive'}, {'id': 7343, 'synset': 'drive.n.10', 'name': 'drive'}, {'id': 7344, 'synset': 'drive_line.n.01', 'name': 'drive_line'}, {'id': 7345, 'synset': 'driver.n.05', 'name': 'driver'}, {'id': 7346, 'synset': 'driveshaft.n.01', 'name': 'driveshaft'}, {'id': 7347, 'synset': 'driveway.n.01', 'name': 'driveway'}, {'id': 7348, 'synset': 'driving_iron.n.01', 'name': 'driving_iron'}, {'id': 7349, 'synset': 'driving_wheel.n.01', 'name': 'driving_wheel'}, {'id': 7350, 'synset': 'drogue.n.04', 'name': 'drogue'}, {'id': 7351, 'synset': 'drogue_parachute.n.01', 'name': 'drogue_parachute'}, {'id': 7352, 'synset': 'drone.n.05', 'name': 'drone'}, {'id': 7353, 'synset': 'drop_arch.n.01', 'name': 'drop_arch'}, {'id': 7354, 'synset': 'drop_cloth.n.02', 'name': 'drop_cloth'}, {'id': 7355, 'synset': 'drop_curtain.n.01', 'name': 'drop_curtain'}, {'id': 7356, 'synset': 'drop_forge.n.01', 'name': 'drop_forge'}, {'id': 7357, 'synset': 'drop-leaf_table.n.01', 'name': 'drop-leaf_table'}, {'id': 7358, 'synset': 'droshky.n.01', 'name': 'droshky'}, {'id': 7359, 'synset': 'drove.n.03', 'name': 'drove'}, {'id': 7360, 'synset': 'drugget.n.01', 'name': 'drugget'}, {'id': 7361, 'synset': 'drugstore.n.01', 'name': 'drugstore'}, {'id': 7362, 'synset': 'drum.n.04', 'name': 'drum'}, {'id': 7363, 'synset': 'drum_brake.n.01', 'name': 'drum_brake'}, {'id': 7364, 'synset': 'drumhead.n.01', 'name': 'drumhead'}, {'id': 7365, 'synset': 'drum_printer.n.01', 'name': 'drum_printer'}, {'id': 7366, 'synset': 'drum_sander.n.01', 'name': 'drum_sander'}, {'id': 7367, 'synset': 'dry_battery.n.01', 'name': 'dry_battery'}, {'id': 7368, 'synset': 'dry-bulb_thermometer.n.01', 'name': 'dry-bulb_thermometer'}, {'id': 7369, 'synset': 'dry_cell.n.01', 'name': 'dry_cell'}, {'id': 7370, 'synset': 'dry_dock.n.01', 'name': 'dry_dock'}, {'id': 7371, 'synset': 'dryer.n.01', 'name': 'dryer'}, {'id': 7372, 'synset': 'dry_fly.n.01', 'name': 'dry_fly'}, {'id': 7373, 'synset': 'dry_kiln.n.01', 'name': 'dry_kiln'}, {'id': 7374, 'synset': 'dry_masonry.n.01', 'name': 'dry_masonry'}, {'id': 7375, 'synset': 'dry_point.n.02', 'name': 'dry_point'}, {'id': 7376, 'synset': 'dry_wall.n.02', 'name': 'dry_wall'}, {'id': 7377, 'synset': 'dual_scan_display.n.01', 'name': 'dual_scan_display'}, {'id': 7378, 'synset': 'duck.n.04', 'name': 'duck'}, {'id': 7379, 'synset': 'duckboard.n.01', 'name': 'duckboard'}, {'id': 7380, 'synset': 'duckpin.n.01', 'name': 'duckpin'}, {'id': 7381, 'synset': 'dudeen.n.01', 'name': 'dudeen'}, {'id': 7382, 'synset': 'duffel.n.02', 'name': 'duffel'}, {'id': 7383, 'synset': 'duffel_coat.n.01', 'name': 'duffel_coat'}, {'id': 7384, 'synset': 'dugout.n.01', 'name': 'dugout'}, {'id': 7385, 'synset': 'dugout_canoe.n.01', 'name': 'dugout_canoe'}, {'id': 7386, 'synset': 'dulciana.n.01', 'name': 'dulciana'}, {'id': 7387, 'synset': 'dulcimer.n.02', 'name': 'dulcimer'}, {'id': 7388, 'synset': 'dulcimer.n.01', 'name': 'dulcimer'}, {'id': 7389, 'synset': 'dumb_bomb.n.01', 'name': 'dumb_bomb'}, {'id': 7390, 'synset': 'dumbwaiter.n.01', 'name': 'dumbwaiter'}, {'id': 7391, 'synset': 'dumdum.n.01', 'name': 'dumdum'}, {'id': 7392, 'synset': 'dumpcart.n.01', 'name': 'dumpcart'}, {'id': 7393, 'synset': 'dump_truck.n.01', 'name': 'dump_truck'}, {'id': 7394, 'synset': 'dumpy_level.n.01', 'name': 'Dumpy_level'}, {'id': 7395, 'synset': 'dunce_cap.n.01', 'name': 'dunce_cap'}, {'id': 7396, 'synset': 'dune_buggy.n.01', 'name': 'dune_buggy'}, {'id': 7397, 'synset': 'dungeon.n.02', 'name': 'dungeon'}, {'id': 7398, 'synset': 'duplex_apartment.n.01', 'name': 'duplex_apartment'}, {'id': 7399, 'synset': 'duplex_house.n.01', 'name': 'duplex_house'}, {'id': 7400, 'synset': 'duplicator.n.01', 'name': 'duplicator'}, {'id': 7401, 'synset': 'dust_bag.n.01', 'name': 'dust_bag'}, {'id': 7402, 'synset': 'dustcloth.n.01', 'name': 'dustcloth'}, {'id': 7403, 'synset': 'dust_cover.n.03', 'name': 'dust_cover'}, {'id': 7404, 'synset': 'dust_cover.n.02', 'name': 'dust_cover'}, {'id': 7405, 'synset': 'dustmop.n.01', 'name': 'dustmop'}, {'id': 7406, 'synset': 'dutch_oven.n.01', 'name': 'Dutch_oven'}, {'id': 7407, 'synset': 'dutch_oven.n.02', 'name': 'Dutch_oven'}, {'id': 7408, 'synset': 'dwelling.n.01', 'name': 'dwelling'}, {'id': 7409, 'synset': 'dye-works.n.01', 'name': 'dye-works'}, {'id': 7410, 'synset': 'dynamo.n.01', 'name': 'dynamo'}, {'id': 7411, 'synset': 'dynamometer.n.01', 'name': 'dynamometer'}, {'id': 7412, 'synset': 'eames_chair.n.01', 'name': 'Eames_chair'}, {'id': 7413, 'synset': 'earflap.n.01', 'name': 'earflap'}, {'id': 7414, 'synset': 'early_warning_radar.n.01', 'name': 'early_warning_radar'}, {'id': 7415, 'synset': 'early_warning_system.n.01', 'name': 'early_warning_system'}, {'id': 7416, 'synset': 'earmuff.n.01', 'name': 'earmuff'}, {'id': 7417, 'synset': 'earplug.n.02', 'name': 'earplug'}, {'id': 7418, 'synset': 'earthenware.n.01', 'name': 'earthenware'}, {'id': 7419, 'synset': 'earthwork.n.01', 'name': 'earthwork'}, {'id': 7420, 'synset': 'easy_chair.n.01', 'name': 'easy_chair'}, {'id': 7421, 'synset': 'eaves.n.01', 'name': 'eaves'}, {'id': 7422, 'synset': 'ecclesiastical_attire.n.01', 'name': 'ecclesiastical_attire'}, {'id': 7423, 'synset': 'echinus.n.01', 'name': 'echinus'}, {'id': 7424, 'synset': 'echocardiograph.n.01', 'name': 'echocardiograph'}, {'id': 7425, 'synset': 'edger.n.02', 'name': 'edger'}, {'id': 7426, 'synset': 'edge_tool.n.01', 'name': 'edge_tool'}, {'id': 7427, 'synset': 'efficiency_apartment.n.01', 'name': 'efficiency_apartment'}, {'id': 7428, 'synset': 'egg-and-dart.n.01', 'name': 'egg-and-dart'}, {'id': 7429, 'synset': 'egg_timer.n.01', 'name': 'egg_timer'}, {'id': 7430, 'synset': 'eiderdown.n.01', 'name': 'eiderdown'}, {'id': 7431, 'synset': 'eight_ball.n.01', 'name': 'eight_ball'}, {'id': 7432, 'synset': 'ejection_seat.n.01', 'name': 'ejection_seat'}, {'id': 7433, 'synset': 'elastic.n.02', 'name': 'elastic'}, {'id': 7434, 'synset': 'elastic_bandage.n.01', 'name': 'elastic_bandage'}, {'id': 7435, 'synset': 'elastoplast.n.01', 'name': 'Elastoplast'}, {'id': 7436, 'synset': 'elbow.n.04', 'name': 'elbow'}, {'id': 7437, 'synset': 'elbow_pad.n.01', 'name': 'elbow_pad'}, {'id': 7438, 'synset': 'electric.n.01', 'name': 'electric'}, {'id': 7439, 'synset': 'electrical_cable.n.01', 'name': 'electrical_cable'}, {'id': 7440, 'synset': 'electrical_contact.n.01', 'name': 'electrical_contact'}, {'id': 7441, 'synset': 'electrical_converter.n.01', 'name': 'electrical_converter'}, {'id': 7442, 'synset': 'electrical_device.n.01', 'name': 'electrical_device'}, {'id': 7443, 'synset': 'electrical_system.n.02', 'name': 'electrical_system'}, {'id': 7444, 'synset': 'electric_bell.n.01', 'name': 'electric_bell'}, {'id': 7445, 'synset': 'electric_blanket.n.01', 'name': 'electric_blanket'}, {'id': 7446, 'synset': 'electric_clock.n.01', 'name': 'electric_clock'}, {'id': 7447, 'synset': 'electric-discharge_lamp.n.01', 'name': 'electric-discharge_lamp'}, {'id': 7448, 'synset': 'electric_fan.n.01', 'name': 'electric_fan'}, {'id': 7449, 'synset': 'electric_frying_pan.n.01', 'name': 'electric_frying_pan'}, {'id': 7450, 'synset': 'electric_furnace.n.01', 'name': 'electric_furnace'}, {'id': 7451, 'synset': 'electric_guitar.n.01', 'name': 'electric_guitar'}, {'id': 7452, 'synset': 'electric_hammer.n.01', 'name': 'electric_hammer'}, {'id': 7453, 'synset': 'electric_heater.n.01', 'name': 'electric_heater'}, {'id': 7454, 'synset': 'electric_lamp.n.01', 'name': 'electric_lamp'}, {'id': 7455, 'synset': 'electric_locomotive.n.01', 'name': 'electric_locomotive'}, {'id': 7456, 'synset': 'electric_meter.n.01', 'name': 'electric_meter'}, {'id': 7457, 'synset': 'electric_mixer.n.01', 'name': 'electric_mixer'}, {'id': 7458, 'synset': 'electric_motor.n.01', 'name': 'electric_motor'}, {'id': 7459, 'synset': 'electric_organ.n.01', 'name': 'electric_organ'}, {'id': 7460, 'synset': 'electric_range.n.01', 'name': 'electric_range'}, {'id': 7461, 'synset': 'electric_toothbrush.n.01', 'name': 'electric_toothbrush'}, {'id': 7462, 'synset': 'electric_typewriter.n.01', 'name': 'electric_typewriter'}, {'id': 7463, 'synset': 'electro-acoustic_transducer.n.01', 'name': 'electro-acoustic_transducer'}, {'id': 7464, 'synset': 'electrode.n.01', 'name': 'electrode'}, {'id': 7465, 'synset': 'electrodynamometer.n.01', 'name': 'electrodynamometer'}, {'id': 7466, 'synset': 'electroencephalograph.n.01', 'name': 'electroencephalograph'}, {'id': 7467, 'synset': 'electrograph.n.01', 'name': 'electrograph'}, {'id': 7468, 'synset': 'electrolytic.n.01', 'name': 'electrolytic'}, {'id': 7469, 'synset': 'electrolytic_cell.n.01', 'name': 'electrolytic_cell'}, {'id': 7470, 'synset': 'electromagnet.n.01', 'name': 'electromagnet'}, {'id': 7471, 'synset': 'electrometer.n.01', 'name': 'electrometer'}, {'id': 7472, 'synset': 'electromyograph.n.01', 'name': 'electromyograph'}, {'id': 7473, 'synset': 'electron_accelerator.n.01', 'name': 'electron_accelerator'}, {'id': 7474, 'synset': 'electron_gun.n.01', 'name': 'electron_gun'}, {'id': 7475, 'synset': 'electronic_balance.n.01', 'name': 'electronic_balance'}, {'id': 7476, 'synset': 'electronic_converter.n.01', 'name': 'electronic_converter'}, {'id': 7477, 'synset': 'electronic_device.n.01', 'name': 'electronic_device'}, {'id': 7478, 'synset': 'electronic_equipment.n.01', 'name': 'electronic_equipment'}, {'id': 7479, 'synset': 'electronic_fetal_monitor.n.01', 'name': 'electronic_fetal_monitor'}, {'id': 7480, 'synset': 'electronic_instrument.n.01', 'name': 'electronic_instrument'}, {'id': 7481, 'synset': 'electronic_voltmeter.n.01', 'name': 'electronic_voltmeter'}, {'id': 7482, 'synset': 'electron_microscope.n.01', 'name': 'electron_microscope'}, {'id': 7483, 'synset': 'electron_multiplier.n.01', 'name': 'electron_multiplier'}, {'id': 7484, 'synset': 'electrophorus.n.01', 'name': 'electrophorus'}, {'id': 7485, 'synset': 'electroscope.n.01', 'name': 'electroscope'}, {'id': 7486, 'synset': 'electrostatic_generator.n.01', 'name': 'electrostatic_generator'}, {'id': 7487, 'synset': 'electrostatic_printer.n.01', 'name': 'electrostatic_printer'}, {'id': 7488, 'synset': 'elevator.n.01', 'name': 'elevator'}, {'id': 7489, 'synset': 'elevator.n.02', 'name': 'elevator'}, {'id': 7490, 'synset': 'elevator_shaft.n.01', 'name': 'elevator_shaft'}, {'id': 7491, 'synset': 'embankment.n.01', 'name': 'embankment'}, {'id': 7492, 'synset': 'embassy.n.01', 'name': 'embassy'}, {'id': 7493, 'synset': 'embellishment.n.02', 'name': 'embellishment'}, {'id': 7494, 'synset': 'emergency_room.n.01', 'name': 'emergency_room'}, {'id': 7495, 'synset': 'emesis_basin.n.01', 'name': 'emesis_basin'}, {'id': 7496, 'synset': 'emitter.n.01', 'name': 'emitter'}, {'id': 7497, 'synset': 'empty.n.01', 'name': 'empty'}, {'id': 7498, 'synset': 'emulsion.n.02', 'name': 'emulsion'}, {'id': 7499, 'synset': 'enamel.n.04', 'name': 'enamel'}, {'id': 7500, 'synset': 'enamel.n.03', 'name': 'enamel'}, {'id': 7501, 'synset': 'enamelware.n.01', 'name': 'enamelware'}, {'id': 7502, 'synset': 'encaustic.n.01', 'name': 'encaustic'}, {'id': 7503, 'synset': 'encephalogram.n.02', 'name': 'encephalogram'}, {'id': 7504, 'synset': 'enclosure.n.01', 'name': 'enclosure'}, {'id': 7505, 'synset': 'endoscope.n.01', 'name': 'endoscope'}, {'id': 7506, 'synset': 'energizer.n.02', 'name': 'energizer'}, {'id': 7507, 'synset': 'engine.n.01', 'name': 'engine'}, {'id': 7508, 'synset': 'engine.n.04', 'name': 'engine'}, {'id': 7509, 'synset': 'engineering.n.03', 'name': 'engineering'}, {'id': 7510, 'synset': 'enginery.n.01', 'name': 'enginery'}, {'id': 7511, 'synset': 'english_horn.n.01', 'name': 'English_horn'}, {'id': 7512, 'synset': 'english_saddle.n.01', 'name': 'English_saddle'}, {'id': 7513, 'synset': 'enlarger.n.01', 'name': 'enlarger'}, {'id': 7514, 'synset': 'ensemble.n.05', 'name': 'ensemble'}, {'id': 7515, 'synset': 'ensign.n.03', 'name': 'ensign'}, {'id': 7516, 'synset': 'entablature.n.01', 'name': 'entablature'}, {'id': 7517, 'synset': 'entertainment_center.n.01', 'name': 'entertainment_center'}, {'id': 7518, 'synset': 'entrenching_tool.n.01', 'name': 'entrenching_tool'}, {'id': 7519, 'synset': 'entrenchment.n.01', 'name': 'entrenchment'}, {'id': 7520, 'synset': 'envelope.n.02', 'name': 'envelope'}, {'id': 7521, 'synset': 'envelope.n.06', 'name': 'envelope'}, {'id': 7522, 'synset': 'eolith.n.01', 'name': 'eolith'}, {'id': 7523, 'synset': 'epauliere.n.01', 'name': 'epauliere'}, {'id': 7524, 'synset': 'epee.n.01', 'name': 'epee'}, {'id': 7525, 'synset': 'epergne.n.01', 'name': 'epergne'}, {'id': 7526, 'synset': 'epicyclic_train.n.01', 'name': 'epicyclic_train'}, {'id': 7527, 'synset': 'epidiascope.n.01', 'name': 'epidiascope'}, {'id': 7528, 'synset': 'epilating_wax.n.01', 'name': 'epilating_wax'}, {'id': 7529, 'synset': 'equalizer.n.01', 'name': 'equalizer'}, {'id': 7530, 'synset': 'equatorial.n.01', 'name': 'equatorial'}, {'id': 7531, 'synset': 'equipment.n.01', 'name': 'equipment'}, {'id': 7532, 'synset': 'erasable_programmable_read-only_memory.n.01', 'name': 'erasable_programmable_read-only_memory'}, {'id': 7533, 'synset': 'erecting_prism.n.01', 'name': 'erecting_prism'}, {'id': 7534, 'synset': 'erection.n.02', 'name': 'erection'}, {'id': 7535, 'synset': 'erlenmeyer_flask.n.01', 'name': 'Erlenmeyer_flask'}, {'id': 7536, 'synset': 'escape_hatch.n.01', 'name': 'escape_hatch'}, {'id': 7537, 'synset': 'escapement.n.01', 'name': 'escapement'}, {'id': 7538, 'synset': 'escape_wheel.n.01', 'name': 'escape_wheel'}, {'id': 7539, 'synset': 'escarpment.n.02', 'name': 'escarpment'}, {'id': 7540, 'synset': 'escutcheon.n.03', 'name': 'escutcheon'}, {'id': 7541, 'synset': 'esophagoscope.n.01', 'name': 'esophagoscope'}, {'id': 7542, 'synset': 'espadrille.n.01', 'name': 'espadrille'}, {'id': 7543, 'synset': 'espalier.n.01', 'name': 'espalier'}, {'id': 7544, 'synset': 'espresso_maker.n.01', 'name': 'espresso_maker'}, {'id': 7545, 'synset': 'espresso_shop.n.01', 'name': 'espresso_shop'}, {'id': 7546, 'synset': 'establishment.n.04', 'name': 'establishment'}, {'id': 7547, 'synset': 'estaminet.n.01', 'name': 'estaminet'}, {'id': 7548, 'synset': 'estradiol_patch.n.01', 'name': 'estradiol_patch'}, {'id': 7549, 'synset': 'etagere.n.01', 'name': 'etagere'}, {'id': 7550, 'synset': 'etamine.n.01', 'name': 'etamine'}, {'id': 7551, 'synset': 'etching.n.02', 'name': 'etching'}, {'id': 7552, 'synset': 'ethernet.n.01', 'name': 'ethernet'}, {'id': 7553, 'synset': 'ethernet_cable.n.01', 'name': 'ethernet_cable'}, {'id': 7554, 'synset': 'eton_jacket.n.01', 'name': 'Eton_jacket'}, {'id': 7555, 'synset': 'etui.n.01', 'name': 'etui'}, {'id': 7556, 'synset': 'eudiometer.n.01', 'name': 'eudiometer'}, {'id': 7557, 'synset': 'euphonium.n.01', 'name': 'euphonium'}, {'id': 7558, 'synset': 'evaporative_cooler.n.01', 'name': 'evaporative_cooler'}, {'id': 7559, 'synset': 'evening_bag.n.01', 'name': 'evening_bag'}, {'id': 7560, 'synset': 'exercise_bike.n.01', 'name': 'exercise_bike'}, {'id': 7561, 'synset': 'exercise_device.n.01', 'name': 'exercise_device'}, {'id': 7562, 'synset': 'exhaust.n.02', 'name': 'exhaust'}, {'id': 7563, 'synset': 'exhaust_fan.n.01', 'name': 'exhaust_fan'}, {'id': 7564, 'synset': 'exhaust_valve.n.01', 'name': 'exhaust_valve'}, {'id': 7565, 'synset': 'exhibition_hall.n.01', 'name': 'exhibition_hall'}, {'id': 7566, 'synset': 'exocet.n.01', 'name': 'Exocet'}, {'id': 7567, 'synset': 'expansion_bit.n.01', 'name': 'expansion_bit'}, {'id': 7568, 'synset': 'expansion_bolt.n.01', 'name': 'expansion_bolt'}, {'id': 7569, 'synset': 'explosive_detection_system.n.01', 'name': 'explosive_detection_system'}, {'id': 7570, 'synset': 'explosive_device.n.01', 'name': 'explosive_device'}, {'id': 7571, 'synset': 'explosive_trace_detection.n.01', 'name': 'explosive_trace_detection'}, {'id': 7572, 'synset': 'express.n.02', 'name': 'express'}, {'id': 7573, 'synset': 'extension.n.10', 'name': 'extension'}, {'id': 7574, 'synset': 'extension_cord.n.01', 'name': 'extension_cord'}, {'id': 7575, 'synset': 'external-combustion_engine.n.01', 'name': 'external-combustion_engine'}, {'id': 7576, 'synset': 'external_drive.n.01', 'name': 'external_drive'}, {'id': 7577, 'synset': 'extractor.n.01', 'name': 'extractor'}, {'id': 7578, 'synset': 'eyebrow_pencil.n.01', 'name': 'eyebrow_pencil'}, {'id': 7579, 'synset': 'eyecup.n.01', 'name': 'eyecup'}, {'id': 7580, 'synset': 'eyeliner.n.01', 'name': 'eyeliner'}, {'id': 7581, 'synset': 'eyepiece.n.01', 'name': 'eyepiece'}, {'id': 7582, 'synset': 'eyeshadow.n.01', 'name': 'eyeshadow'}, {'id': 7583, 'synset': 'fabric.n.01', 'name': 'fabric'}, {'id': 7584, 'synset': 'facade.n.01', 'name': 'facade'}, {'id': 7585, 'synset': 'face_guard.n.01', 'name': 'face_guard'}, {'id': 7586, 'synset': 'face_mask.n.01', 'name': 'face_mask'}, {'id': 7587, 'synset': 'faceplate.n.01', 'name': 'faceplate'}, {'id': 7588, 'synset': 'face_powder.n.01', 'name': 'face_powder'}, {'id': 7589, 'synset': 'face_veil.n.01', 'name': 'face_veil'}, {'id': 7590, 'synset': 'facing.n.03', 'name': 'facing'}, {'id': 7591, 'synset': 'facing.n.01', 'name': 'facing'}, {'id': 7592, 'synset': 'facing.n.02', 'name': 'facing'}, {'id': 7593, 'synset': 'facsimile.n.02', 'name': 'facsimile'}, {'id': 7594, 'synset': 'factory.n.01', 'name': 'factory'}, {'id': 7595, 'synset': 'factory_ship.n.01', 'name': 'factory_ship'}, {'id': 7596, 'synset': 'fagot.n.02', 'name': 'fagot'}, {'id': 7597, 'synset': 'fagot_stitch.n.01', 'name': 'fagot_stitch'}, {'id': 7598, 'synset': 'fahrenheit_thermometer.n.01', 'name': 'Fahrenheit_thermometer'}, {'id': 7599, 'synset': 'faience.n.01', 'name': 'faience'}, {'id': 7600, 'synset': 'faille.n.01', 'name': 'faille'}, {'id': 7601, 'synset': 'fairlead.n.01', 'name': 'fairlead'}, {'id': 7602, 'synset': 'fairy_light.n.01', 'name': 'fairy_light'}, {'id': 7603, 'synset': 'falchion.n.01', 'name': 'falchion'}, {'id': 7604, 'synset': 'fallboard.n.01', 'name': 'fallboard'}, {'id': 7605, 'synset': 'fallout_shelter.n.01', 'name': 'fallout_shelter'}, {'id': 7606, 'synset': 'false_face.n.01', 'name': 'false_face'}, {'id': 7607, 'synset': 'false_teeth.n.01', 'name': 'false_teeth'}, {'id': 7608, 'synset': 'family_room.n.01', 'name': 'family_room'}, {'id': 7609, 'synset': 'fan_belt.n.01', 'name': 'fan_belt'}, {'id': 7610, 'synset': 'fan_blade.n.01', 'name': 'fan_blade'}, {'id': 7611, 'synset': 'fancy_dress.n.01', 'name': 'fancy_dress'}, {'id': 7612, 'synset': 'fanion.n.01', 'name': 'fanion'}, {'id': 7613, 'synset': 'fanlight.n.03', 'name': 'fanlight'}, {'id': 7614, 'synset': 'fanjet.n.02', 'name': 'fanjet'}, {'id': 7615, 'synset': 'fanjet.n.01', 'name': 'fanjet'}, {'id': 7616, 'synset': 'fanny_pack.n.01', 'name': 'fanny_pack'}, {'id': 7617, 'synset': 'fan_tracery.n.01', 'name': 'fan_tracery'}, {'id': 7618, 'synset': 'fan_vaulting.n.01', 'name': 'fan_vaulting'}, {'id': 7619, 'synset': 'farm_building.n.01', 'name': 'farm_building'}, {'id': 7620, 'synset': "farmer's_market.n.01", 'name': "farmer's_market"}, {'id': 7621, 'synset': 'farmhouse.n.01', 'name': 'farmhouse'}, {'id': 7622, 'synset': 'farm_machine.n.01', 'name': 'farm_machine'}, {'id': 7623, 'synset': 'farmplace.n.01', 'name': 'farmplace'}, {'id': 7624, 'synset': 'farmyard.n.01', 'name': 'farmyard'}, {'id': 7625, 'synset': 'farthingale.n.01', 'name': 'farthingale'}, {'id': 7626, 'synset': 'fastener.n.02', 'name': 'fastener'}, {'id': 7627, 'synset': 'fast_reactor.n.01', 'name': 'fast_reactor'}, {'id': 7628, 'synset': 'fat_farm.n.01', 'name': 'fat_farm'}, {'id': 7629, 'synset': 'fatigues.n.01', 'name': 'fatigues'}, {'id': 7630, 'synset': 'fauld.n.01', 'name': 'fauld'}, {'id': 7631, 'synset': 'fauteuil.n.01', 'name': 'fauteuil'}, {'id': 7632, 'synset': 'feather_boa.n.01', 'name': 'feather_boa'}, {'id': 7633, 'synset': 'featheredge.n.01', 'name': 'featheredge'}, {'id': 7634, 'synset': 'feedback_circuit.n.01', 'name': 'feedback_circuit'}, {'id': 7635, 'synset': 'feedlot.n.01', 'name': 'feedlot'}, {'id': 7636, 'synset': 'fell.n.02', 'name': 'fell'}, {'id': 7637, 'synset': 'felloe.n.01', 'name': 'felloe'}, {'id': 7638, 'synset': 'felt.n.01', 'name': 'felt'}, {'id': 7639, 'synset': 'felt-tip_pen.n.01', 'name': 'felt-tip_pen'}, {'id': 7640, 'synset': 'felucca.n.01', 'name': 'felucca'}, {'id': 7641, 'synset': 'fence.n.01', 'name': 'fence'}, {'id': 7642, 'synset': 'fencing_mask.n.01', 'name': 'fencing_mask'}, {'id': 7643, 'synset': 'fencing_sword.n.01', 'name': 'fencing_sword'}, {'id': 7644, 'synset': 'fender.n.01', 'name': 'fender'}, {'id': 7645, 'synset': 'fender.n.02', 'name': 'fender'}, {'id': 7646, 'synset': 'ferrule.n.01', 'name': 'ferrule'}, {'id': 7647, 'synset': 'ferule.n.01', 'name': 'ferule'}, {'id': 7648, 'synset': 'festoon.n.01', 'name': 'festoon'}, {'id': 7649, 'synset': 'fetoscope.n.01', 'name': 'fetoscope'}, {'id': 7650, 'synset': 'fetter.n.01', 'name': 'fetter'}, {'id': 7651, 'synset': 'fez.n.02', 'name': 'fez'}, {'id': 7652, 'synset': 'fiber.n.05', 'name': 'fiber'}, {'id': 7653, 'synset': 'fiber_optic_cable.n.01', 'name': 'fiber_optic_cable'}, {'id': 7654, 'synset': 'fiberscope.n.01', 'name': 'fiberscope'}, {'id': 7655, 'synset': 'fichu.n.01', 'name': 'fichu'}, {'id': 7656, 'synset': 'fiddlestick.n.01', 'name': 'fiddlestick'}, {'id': 7657, 'synset': 'field_artillery.n.01', 'name': 'field_artillery'}, {'id': 7658, 'synset': 'field_coil.n.01', 'name': 'field_coil'}, {'id': 7659, 'synset': 'field-effect_transistor.n.01', 'name': 'field-effect_transistor'}, {'id': 7660, 'synset': 'field-emission_microscope.n.01', 'name': 'field-emission_microscope'}, {'id': 7661, 'synset': 'field_glass.n.01', 'name': 'field_glass'}, {'id': 7662, 'synset': 'field_hockey_ball.n.01', 'name': 'field_hockey_ball'}, {'id': 7663, 'synset': 'field_hospital.n.01', 'name': 'field_hospital'}, {'id': 7664, 'synset': 'field_house.n.01', 'name': 'field_house'}, {'id': 7665, 'synset': 'field_lens.n.01', 'name': 'field_lens'}, {'id': 7666, 'synset': 'field_magnet.n.01', 'name': 'field_magnet'}, {'id': 7667, 'synset': 'field-sequential_color_television.n.01', 'name': 'field-sequential_color_television'}, {'id': 7668, 'synset': 'field_tent.n.01', 'name': 'field_tent'}, {'id': 7669, 'synset': 'fieldwork.n.01', 'name': 'fieldwork'}, {'id': 7670, 'synset': 'fife.n.01', 'name': 'fife'}, {'id': 7671, 'synset': 'fifth_wheel.n.02', 'name': 'fifth_wheel'}, {'id': 7672, 'synset': 'fighting_chair.n.01', 'name': 'fighting_chair'}, {'id': 7673, 'synset': 'fig_leaf.n.02', 'name': 'fig_leaf'}, {'id': 7674, 'synset': 'figure_eight.n.01', 'name': 'figure_eight'}, {'id': 7675, 'synset': 'figure_loom.n.01', 'name': 'figure_loom'}, {'id': 7676, 'synset': 'figure_skate.n.01', 'name': 'figure_skate'}, {'id': 7677, 'synset': 'filament.n.04', 'name': 'filament'}, {'id': 7678, 'synset': 'filature.n.01', 'name': 'filature'}, {'id': 7679, 'synset': 'file_folder.n.01', 'name': 'file_folder'}, {'id': 7680, 'synset': 'file_server.n.01', 'name': 'file_server'}, {'id': 7681, 'synset': 'filigree.n.01', 'name': 'filigree'}, {'id': 7682, 'synset': 'filling.n.05', 'name': 'filling'}, {'id': 7683, 'synset': 'film.n.03', 'name': 'film'}, {'id': 7684, 'synset': 'film.n.05', 'name': 'film'}, {'id': 7685, 'synset': 'film_advance.n.01', 'name': 'film_advance'}, {'id': 7686, 'synset': 'filter.n.01', 'name': 'filter'}, {'id': 7687, 'synset': 'filter.n.02', 'name': 'filter'}, {'id': 7688, 'synset': 'finder.n.03', 'name': 'finder'}, {'id': 7689, 'synset': 'finery.n.01', 'name': 'finery'}, {'id': 7690, 'synset': 'fine-tooth_comb.n.01', 'name': 'fine-tooth_comb'}, {'id': 7691, 'synset': 'finger.n.03', 'name': 'finger'}, {'id': 7692, 'synset': 'fingerboard.n.03', 'name': 'fingerboard'}, {'id': 7693, 'synset': 'finger_bowl.n.01', 'name': 'finger_bowl'}, {'id': 7694, 'synset': 'finger_paint.n.01', 'name': 'finger_paint'}, {'id': 7695, 'synset': 'finger-painting.n.01', 'name': 'finger-painting'}, {'id': 7696, 'synset': 'finger_plate.n.01', 'name': 'finger_plate'}, {'id': 7697, 'synset': 'fingerstall.n.01', 'name': 'fingerstall'}, {'id': 7698, 'synset': 'finish_coat.n.02', 'name': 'finish_coat'}, {'id': 7699, 'synset': 'finish_coat.n.01', 'name': 'finish_coat'}, {'id': 7700, 'synset': 'finisher.n.05', 'name': 'finisher'}, {'id': 7701, 'synset': 'fin_keel.n.01', 'name': 'fin_keel'}, {'id': 7702, 'synset': 'fipple.n.01', 'name': 'fipple'}, {'id': 7703, 'synset': 'fipple_flute.n.01', 'name': 'fipple_flute'}, {'id': 7704, 'synset': 'fire.n.04', 'name': 'fire'}, {'id': 7705, 'synset': 'firearm.n.01', 'name': 'firearm'}, {'id': 7706, 'synset': 'fire_bell.n.01', 'name': 'fire_bell'}, {'id': 7707, 'synset': 'fireboat.n.01', 'name': 'fireboat'}, {'id': 7708, 'synset': 'firebox.n.01', 'name': 'firebox'}, {'id': 7709, 'synset': 'firebrick.n.01', 'name': 'firebrick'}, {'id': 7710, 'synset': 'fire_control_radar.n.01', 'name': 'fire_control_radar'}, {'id': 7711, 'synset': 'fire_control_system.n.01', 'name': 'fire_control_system'}, {'id': 7712, 'synset': 'fire_iron.n.01', 'name': 'fire_iron'}, {'id': 7713, 'synset': "fireman's_ax.n.01", 'name': "fireman's_ax"}, {'id': 7714, 'synset': 'fire_screen.n.01', 'name': 'fire_screen'}, {'id': 7715, 'synset': 'fire_tongs.n.01', 'name': 'fire_tongs'}, {'id': 7716, 'synset': 'fire_tower.n.01', 'name': 'fire_tower'}, {'id': 7717, 'synset': 'firewall.n.02', 'name': 'firewall'}, {'id': 7718, 'synset': 'firing_chamber.n.01', 'name': 'firing_chamber'}, {'id': 7719, 'synset': 'firing_pin.n.01', 'name': 'firing_pin'}, {'id': 7720, 'synset': 'firkin.n.02', 'name': 'firkin'}, {'id': 7721, 'synset': 'firmer_chisel.n.01', 'name': 'firmer_chisel'}, {'id': 7722, 'synset': 'first-aid_station.n.01', 'name': 'first-aid_station'}, {'id': 7723, 'synset': 'first_base.n.01', 'name': 'first_base'}, {'id': 7724, 'synset': 'first_class.n.03', 'name': 'first_class'}, {'id': 7725, 'synset': "fisherman's_bend.n.01", 'name': "fisherman's_bend"}, {'id': 7726, 'synset': "fisherman's_knot.n.01", 'name': "fisherman's_knot"}, {'id': 7727, 'synset': "fisherman's_lure.n.01", 'name': "fisherman's_lure"}, {'id': 7728, 'synset': 'fishhook.n.01', 'name': 'fishhook'}, {'id': 7729, 'synset': 'fishing_boat.n.01', 'name': 'fishing_boat'}, {'id': 7730, 'synset': 'fishing_gear.n.01', 'name': 'fishing_gear'}, {'id': 7731, 'synset': 'fish_joint.n.01', 'name': 'fish_joint'}, {'id': 7732, 'synset': 'fish_knife.n.01', 'name': 'fish_knife'}, {'id': 7733, 'synset': 'fishnet.n.01', 'name': 'fishnet'}, {'id': 7734, 'synset': 'fish_slice.n.01', 'name': 'fish_slice'}, {'id': 7735, 'synset': 'fitment.n.01', 'name': 'fitment'}, {'id': 7736, 'synset': 'fixative.n.02', 'name': 'fixative'}, {'id': 7737, 'synset': 'fixer-upper.n.01', 'name': 'fixer-upper'}, {'id': 7738, 'synset': 'flageolet.n.02', 'name': 'flageolet'}, {'id': 7739, 'synset': 'flagon.n.01', 'name': 'flagon'}, {'id': 7740, 'synset': 'flagship.n.02', 'name': 'flagship'}, {'id': 7741, 'synset': 'flail.n.01', 'name': 'flail'}, {'id': 7742, 'synset': 'flambeau.n.01', 'name': 'flambeau'}, {'id': 7743, 'synset': 'flamethrower.n.01', 'name': 'flamethrower'}, {'id': 7744, 'synset': 'flange.n.01', 'name': 'flange'}, {'id': 7745, 'synset': 'flannel.n.03', 'name': 'flannel'}, {'id': 7746, 'synset': 'flannelette.n.01', 'name': 'flannelette'}, {'id': 7747, 'synset': 'flap.n.05', 'name': 'flap'}, {'id': 7748, 'synset': 'flash.n.09', 'name': 'flash'}, {'id': 7749, 'synset': 'flash_camera.n.01', 'name': 'flash_camera'}, {'id': 7750, 'synset': 'flasher.n.02', 'name': 'flasher'}, {'id': 7751, 'synset': 'flashlight_battery.n.01', 'name': 'flashlight_battery'}, {'id': 7752, 'synset': 'flash_memory.n.01', 'name': 'flash_memory'}, {'id': 7753, 'synset': 'flask.n.01', 'name': 'flask'}, {'id': 7754, 'synset': 'flat_arch.n.01', 'name': 'flat_arch'}, {'id': 7755, 'synset': 'flatbed.n.02', 'name': 'flatbed'}, {'id': 7756, 'synset': 'flatbed_press.n.01', 'name': 'flatbed_press'}, {'id': 7757, 'synset': 'flat_bench.n.01', 'name': 'flat_bench'}, {'id': 7758, 'synset': 'flatcar.n.01', 'name': 'flatcar'}, {'id': 7759, 'synset': 'flat_file.n.01', 'name': 'flat_file'}, {'id': 7760, 'synset': 'flatlet.n.01', 'name': 'flatlet'}, {'id': 7761, 'synset': 'flat_panel_display.n.01', 'name': 'flat_panel_display'}, {'id': 7762, 'synset': 'flats.n.01', 'name': 'flats'}, {'id': 7763, 'synset': 'flat_tip_screwdriver.n.01', 'name': 'flat_tip_screwdriver'}, {'id': 7764, 'synset': 'fleet_ballistic_missile_submarine.n.01', 'name': 'fleet_ballistic_missile_submarine'}, {'id': 7765, 'synset': 'fleur-de-lis.n.02', 'name': 'fleur-de-lis'}, {'id': 7766, 'synset': 'flight_simulator.n.01', 'name': 'flight_simulator'}, {'id': 7767, 'synset': 'flintlock.n.02', 'name': 'flintlock'}, {'id': 7768, 'synset': 'flintlock.n.01', 'name': 'flintlock'}, {'id': 7769, 'synset': 'float.n.05', 'name': 'float'}, {'id': 7770, 'synset': 'floating_dock.n.01', 'name': 'floating_dock'}, {'id': 7771, 'synset': 'floatplane.n.01', 'name': 'floatplane'}, {'id': 7772, 'synset': 'flood.n.03', 'name': 'flood'}, {'id': 7773, 'synset': 'floor.n.01', 'name': 'floor'}, {'id': 7774, 'synset': 'floor.n.02', 'name': 'floor'}, {'id': 7775, 'synset': 'floor.n.09', 'name': 'floor'}, {'id': 7776, 'synset': 'floorboard.n.02', 'name': 'floorboard'}, {'id': 7777, 'synset': 'floor_cover.n.01', 'name': 'floor_cover'}, {'id': 7778, 'synset': 'floor_joist.n.01', 'name': 'floor_joist'}, {'id': 7779, 'synset': 'floor_lamp.n.01', 'name': 'floor_lamp'}, {'id': 7780, 'synset': 'flophouse.n.01', 'name': 'flophouse'}, {'id': 7781, 'synset': 'florist.n.02', 'name': 'florist'}, {'id': 7782, 'synset': 'floss.n.01', 'name': 'floss'}, {'id': 7783, 'synset': 'flotsam.n.01', 'name': 'flotsam'}, {'id': 7784, 'synset': 'flour_bin.n.01', 'name': 'flour_bin'}, {'id': 7785, 'synset': 'flour_mill.n.01', 'name': 'flour_mill'}, {'id': 7786, 'synset': 'flowerbed.n.01', 'name': 'flowerbed'}, {'id': 7787, 'synset': 'flugelhorn.n.01', 'name': 'flugelhorn'}, {'id': 7788, 'synset': 'fluid_drive.n.01', 'name': 'fluid_drive'}, {'id': 7789, 'synset': 'fluid_flywheel.n.01', 'name': 'fluid_flywheel'}, {'id': 7790, 'synset': 'flume.n.02', 'name': 'flume'}, {'id': 7791, 'synset': 'fluorescent_lamp.n.01', 'name': 'fluorescent_lamp'}, {'id': 7792, 'synset': 'fluoroscope.n.01', 'name': 'fluoroscope'}, {'id': 7793, 'synset': 'flush_toilet.n.01', 'name': 'flush_toilet'}, {'id': 7794, 'synset': 'flute.n.01', 'name': 'flute'}, {'id': 7795, 'synset': 'flux_applicator.n.01', 'name': 'flux_applicator'}, {'id': 7796, 'synset': 'fluxmeter.n.01', 'name': 'fluxmeter'}, {'id': 7797, 'synset': 'fly.n.05', 'name': 'fly'}, {'id': 7798, 'synset': 'flying_boat.n.01', 'name': 'flying_boat'}, {'id': 7799, 'synset': 'flying_buttress.n.01', 'name': 'flying_buttress'}, {'id': 7800, 'synset': 'flying_carpet.n.01', 'name': 'flying_carpet'}, {'id': 7801, 'synset': 'flying_jib.n.01', 'name': 'flying_jib'}, {'id': 7802, 'synset': 'fly_rod.n.01', 'name': 'fly_rod'}, {'id': 7803, 'synset': 'fly_tent.n.01', 'name': 'fly_tent'}, {'id': 7804, 'synset': 'flytrap.n.01', 'name': 'flytrap'}, {'id': 7805, 'synset': 'flywheel.n.01', 'name': 'flywheel'}, {'id': 7806, 'synset': 'fob.n.03', 'name': 'fob'}, {'id': 7807, 'synset': 'foghorn.n.02', 'name': 'foghorn'}, {'id': 7808, 'synset': 'foglamp.n.01', 'name': 'foglamp'}, {'id': 7809, 'synset': 'foil.n.05', 'name': 'foil'}, {'id': 7810, 'synset': 'fold.n.06', 'name': 'fold'}, {'id': 7811, 'synset': 'folder.n.02', 'name': 'folder'}, {'id': 7812, 'synset': 'folding_door.n.01', 'name': 'folding_door'}, {'id': 7813, 'synset': 'folding_saw.n.01', 'name': 'folding_saw'}, {'id': 7814, 'synset': 'food_court.n.01', 'name': 'food_court'}, {'id': 7815, 'synset': 'food_hamper.n.01', 'name': 'food_hamper'}, {'id': 7816, 'synset': 'foot.n.11', 'name': 'foot'}, {'id': 7817, 'synset': 'footage.n.01', 'name': 'footage'}, {'id': 7818, 'synset': 'football_stadium.n.01', 'name': 'football_stadium'}, {'id': 7819, 'synset': 'footbath.n.01', 'name': 'footbath'}, {'id': 7820, 'synset': 'foot_brake.n.01', 'name': 'foot_brake'}, {'id': 7821, 'synset': 'footbridge.n.01', 'name': 'footbridge'}, {'id': 7822, 'synset': 'foothold.n.02', 'name': 'foothold'}, {'id': 7823, 'synset': 'footlocker.n.01', 'name': 'footlocker'}, {'id': 7824, 'synset': 'foot_rule.n.01', 'name': 'foot_rule'}, {'id': 7825, 'synset': 'footwear.n.02', 'name': 'footwear'}, {'id': 7826, 'synset': 'footwear.n.01', 'name': 'footwear'}, {'id': 7827, 'synset': 'forceps.n.01', 'name': 'forceps'}, {'id': 7828, 'synset': 'force_pump.n.01', 'name': 'force_pump'}, {'id': 7829, 'synset': 'fore-and-after.n.01', 'name': 'fore-and-after'}, {'id': 7830, 'synset': 'fore-and-aft_sail.n.01', 'name': 'fore-and-aft_sail'}, {'id': 7831, 'synset': 'forecastle.n.01', 'name': 'forecastle'}, {'id': 7832, 'synset': 'forecourt.n.01', 'name': 'forecourt'}, {'id': 7833, 'synset': 'foredeck.n.01', 'name': 'foredeck'}, {'id': 7834, 'synset': 'fore_edge.n.01', 'name': 'fore_edge'}, {'id': 7835, 'synset': 'foreground.n.02', 'name': 'foreground'}, {'id': 7836, 'synset': 'foremast.n.01', 'name': 'foremast'}, {'id': 7837, 'synset': 'fore_plane.n.01', 'name': 'fore_plane'}, {'id': 7838, 'synset': 'foresail.n.01', 'name': 'foresail'}, {'id': 7839, 'synset': 'forestay.n.01', 'name': 'forestay'}, {'id': 7840, 'synset': 'foretop.n.01', 'name': 'foretop'}, {'id': 7841, 'synset': 'fore-topmast.n.01', 'name': 'fore-topmast'}, {'id': 7842, 'synset': 'fore-topsail.n.01', 'name': 'fore-topsail'}, {'id': 7843, 'synset': 'forge.n.01', 'name': 'forge'}, {'id': 7844, 'synset': 'fork.n.04', 'name': 'fork'}, {'id': 7845, 'synset': 'formalwear.n.01', 'name': 'formalwear'}, {'id': 7846, 'synset': 'formica.n.01', 'name': 'Formica'}, {'id': 7847, 'synset': 'fortification.n.01', 'name': 'fortification'}, {'id': 7848, 'synset': 'fortress.n.01', 'name': 'fortress'}, {'id': 7849, 'synset': 'forty-five.n.01', 'name': 'forty-five'}, {'id': 7850, 'synset': 'foucault_pendulum.n.01', 'name': 'Foucault_pendulum'}, {'id': 7851, 'synset': 'foulard.n.01', 'name': 'foulard'}, {'id': 7852, 'synset': 'foul-weather_gear.n.01', 'name': 'foul-weather_gear'}, {'id': 7853, 'synset': 'foundation_garment.n.01', 'name': 'foundation_garment'}, {'id': 7854, 'synset': 'foundry.n.01', 'name': 'foundry'}, {'id': 7855, 'synset': 'fountain.n.01', 'name': 'fountain'}, {'id': 7856, 'synset': 'fountain_pen.n.01', 'name': 'fountain_pen'}, {'id': 7857, 'synset': 'four-in-hand.n.01', 'name': 'four-in-hand'}, {'id': 7858, 'synset': 'four-poster.n.01', 'name': 'four-poster'}, {'id': 7859, 'synset': 'four-pounder.n.01', 'name': 'four-pounder'}, {'id': 7860, 'synset': 'four-stroke_engine.n.01', 'name': 'four-stroke_engine'}, {'id': 7861, 'synset': 'four-wheel_drive.n.02', 'name': 'four-wheel_drive'}, {'id': 7862, 'synset': 'four-wheel_drive.n.01', 'name': 'four-wheel_drive'}, {'id': 7863, 'synset': 'four-wheeler.n.01', 'name': 'four-wheeler'}, {'id': 7864, 'synset': 'fowling_piece.n.01', 'name': 'fowling_piece'}, {'id': 7865, 'synset': 'foxhole.n.01', 'name': 'foxhole'}, {'id': 7866, 'synset': 'fragmentation_bomb.n.01', 'name': 'fragmentation_bomb'}, {'id': 7867, 'synset': 'frail.n.02', 'name': 'frail'}, {'id': 7868, 'synset': 'fraise.n.02', 'name': 'fraise'}, {'id': 7869, 'synset': 'frame.n.10', 'name': 'frame'}, {'id': 7870, 'synset': 'frame.n.01', 'name': 'frame'}, {'id': 7871, 'synset': 'frame_buffer.n.01', 'name': 'frame_buffer'}, {'id': 7872, 'synset': 'framework.n.03', 'name': 'framework'}, {'id': 7873, 'synset': 'francis_turbine.n.01', 'name': 'Francis_turbine'}, {'id': 7874, 'synset': 'franking_machine.n.01', 'name': 'franking_machine'}, {'id': 7875, 'synset': 'free_house.n.01', 'name': 'free_house'}, {'id': 7876, 'synset': 'free-reed.n.01', 'name': 'free-reed'}, {'id': 7877, 'synset': 'free-reed_instrument.n.01', 'name': 'free-reed_instrument'}, {'id': 7878, 'synset': 'freewheel.n.01', 'name': 'freewheel'}, {'id': 7879, 'synset': 'freight_elevator.n.01', 'name': 'freight_elevator'}, {'id': 7880, 'synset': 'freight_liner.n.01', 'name': 'freight_liner'}, {'id': 7881, 'synset': 'freight_train.n.01', 'name': 'freight_train'}, {'id': 7882, 'synset': 'french_door.n.01', 'name': 'French_door'}, {'id': 7883, 'synset': 'french_horn.n.01', 'name': 'French_horn'}, {'id': 7884, 'synset': 'french_polish.n.02', 'name': 'French_polish'}, {'id': 7885, 'synset': 'french_roof.n.01', 'name': 'French_roof'}, {'id': 7886, 'synset': 'french_window.n.01', 'name': 'French_window'}, {'id': 7887, 'synset': 'fresnel_lens.n.01', 'name': 'Fresnel_lens'}, {'id': 7888, 'synset': 'fret.n.04', 'name': 'fret'}, {'id': 7889, 'synset': 'friary.n.01', 'name': 'friary'}, {'id': 7890, 'synset': 'friction_clutch.n.01', 'name': 'friction_clutch'}, {'id': 7891, 'synset': 'frieze.n.02', 'name': 'frieze'}, {'id': 7892, 'synset': 'frieze.n.01', 'name': 'frieze'}, {'id': 7893, 'synset': 'frigate.n.02', 'name': 'frigate'}, {'id': 7894, 'synset': 'frigate.n.01', 'name': 'frigate'}, {'id': 7895, 'synset': 'frill.n.03', 'name': 'frill'}, {'id': 7896, 'synset': 'frock.n.01', 'name': 'frock'}, {'id': 7897, 'synset': 'frock_coat.n.01', 'name': 'frock_coat'}, {'id': 7898, 'synset': 'frontlet.n.01', 'name': 'frontlet'}, {'id': 7899, 'synset': 'front_porch.n.01', 'name': 'front_porch'}, {'id': 7900, 'synset': 'front_projector.n.01', 'name': 'front_projector'}, {'id': 7901, 'synset': 'fruit_machine.n.01', 'name': 'fruit_machine'}, {'id': 7902, 'synset': 'fuel_filter.n.01', 'name': 'fuel_filter'}, {'id': 7903, 'synset': 'fuel_gauge.n.01', 'name': 'fuel_gauge'}, {'id': 7904, 'synset': 'fuel_injection.n.01', 'name': 'fuel_injection'}, {'id': 7905, 'synset': 'fuel_system.n.01', 'name': 'fuel_system'}, {'id': 7906, 'synset': 'full-dress_uniform.n.01', 'name': 'full-dress_uniform'}, {'id': 7907, 'synset': 'full_metal_jacket.n.01', 'name': 'full_metal_jacket'}, {'id': 7908, 'synset': 'full_skirt.n.01', 'name': 'full_skirt'}, {'id': 7909, 'synset': 'fumigator.n.02', 'name': 'fumigator'}, {'id': 7910, 'synset': 'funeral_home.n.01', 'name': 'funeral_home'}, {'id': 7911, 'synset': 'funny_wagon.n.01', 'name': 'funny_wagon'}, {'id': 7912, 'synset': 'fur.n.03', 'name': 'fur'}, {'id': 7913, 'synset': 'fur_coat.n.01', 'name': 'fur_coat'}, {'id': 7914, 'synset': 'fur_hat.n.01', 'name': 'fur_hat'}, {'id': 7915, 'synset': 'furnace.n.01', 'name': 'furnace'}, {'id': 7916, 'synset': 'furnace_lining.n.01', 'name': 'furnace_lining'}, {'id': 7917, 'synset': 'furnace_room.n.01', 'name': 'furnace_room'}, {'id': 7918, 'synset': 'furnishing.n.02', 'name': 'furnishing'}, {'id': 7919, 'synset': 'furnishing.n.01', 'name': 'furnishing'}, {'id': 7920, 'synset': 'furniture.n.01', 'name': 'furniture'}, {'id': 7921, 'synset': 'fur-piece.n.01', 'name': 'fur-piece'}, {'id': 7922, 'synset': 'furrow.n.01', 'name': 'furrow'}, {'id': 7923, 'synset': 'fuse.n.01', 'name': 'fuse'}, {'id': 7924, 'synset': 'fusee_drive.n.01', 'name': 'fusee_drive'}, {'id': 7925, 'synset': 'fuselage.n.01', 'name': 'fuselage'}, {'id': 7926, 'synset': 'fusil.n.01', 'name': 'fusil'}, {'id': 7927, 'synset': 'fustian.n.02', 'name': 'fustian'}, {'id': 7928, 'synset': 'gabardine.n.01', 'name': 'gabardine'}, {'id': 7929, 'synset': 'gable.n.01', 'name': 'gable'}, {'id': 7930, 'synset': 'gable_roof.n.01', 'name': 'gable_roof'}, {'id': 7931, 'synset': 'gadgetry.n.01', 'name': 'gadgetry'}, {'id': 7932, 'synset': 'gaff.n.03', 'name': 'gaff'}, {'id': 7933, 'synset': 'gaff.n.02', 'name': 'gaff'}, {'id': 7934, 'synset': 'gaff.n.01', 'name': 'gaff'}, {'id': 7935, 'synset': 'gaffsail.n.01', 'name': 'gaffsail'}, {'id': 7936, 'synset': 'gaff_topsail.n.01', 'name': 'gaff_topsail'}, {'id': 7937, 'synset': 'gaiter.n.03', 'name': 'gaiter'}, {'id': 7938, 'synset': 'gaiter.n.02', 'name': 'gaiter'}, {'id': 7939, 'synset': 'galilean_telescope.n.01', 'name': 'Galilean_telescope'}, {'id': 7940, 'synset': 'galleon.n.01', 'name': 'galleon'}, {'id': 7941, 'synset': 'gallery.n.04', 'name': 'gallery'}, {'id': 7942, 'synset': 'gallery.n.03', 'name': 'gallery'}, {'id': 7943, 'synset': 'galley.n.04', 'name': 'galley'}, {'id': 7944, 'synset': 'galley.n.03', 'name': 'galley'}, {'id': 7945, 'synset': 'galley.n.02', 'name': 'galley'}, {'id': 7946, 'synset': 'gallows.n.01', 'name': 'gallows'}, {'id': 7947, 'synset': 'gallows_tree.n.01', 'name': 'gallows_tree'}, {'id': 7948, 'synset': 'galvanometer.n.01', 'name': 'galvanometer'}, {'id': 7949, 'synset': 'gambling_house.n.01', 'name': 'gambling_house'}, {'id': 7950, 'synset': 'gambrel.n.01', 'name': 'gambrel'}, {'id': 7951, 'synset': 'game.n.09', 'name': 'game'}, {'id': 7952, 'synset': 'gamebag.n.01', 'name': 'gamebag'}, {'id': 7953, 'synset': 'game_equipment.n.01', 'name': 'game_equipment'}, {'id': 7954, 'synset': 'gaming_table.n.01', 'name': 'gaming_table'}, {'id': 7955, 'synset': 'gamp.n.01', 'name': 'gamp'}, {'id': 7956, 'synset': 'gangplank.n.01', 'name': 'gangplank'}, {'id': 7957, 'synset': 'gangsaw.n.01', 'name': 'gangsaw'}, {'id': 7958, 'synset': 'gangway.n.01', 'name': 'gangway'}, {'id': 7959, 'synset': 'gantlet.n.04', 'name': 'gantlet'}, {'id': 7960, 'synset': 'gantry.n.01', 'name': 'gantry'}, {'id': 7961, 'synset': 'garage.n.01', 'name': 'garage'}, {'id': 7962, 'synset': 'garage.n.02', 'name': 'garage'}, {'id': 7963, 'synset': 'garand_rifle.n.01', 'name': 'Garand_rifle'}, {'id': 7964, 'synset': 'garboard.n.01', 'name': 'garboard'}, {'id': 7965, 'synset': 'garden.n.01', 'name': 'garden'}, {'id': 7966, 'synset': 'garden.n.03', 'name': 'garden'}, {'id': 7967, 'synset': 'garden_rake.n.01', 'name': 'garden_rake'}, {'id': 7968, 'synset': 'garden_spade.n.01', 'name': 'garden_spade'}, {'id': 7969, 'synset': 'garden_tool.n.01', 'name': 'garden_tool'}, {'id': 7970, 'synset': 'garden_trowel.n.01', 'name': 'garden_trowel'}, {'id': 7971, 'synset': 'gargoyle.n.01', 'name': 'gargoyle'}, {'id': 7972, 'synset': 'garibaldi.n.02', 'name': 'garibaldi'}, {'id': 7973, 'synset': 'garlic_press.n.01', 'name': 'garlic_press'}, {'id': 7974, 'synset': 'garment.n.01', 'name': 'garment'}, {'id': 7975, 'synset': 'garment_bag.n.01', 'name': 'garment_bag'}, {'id': 7976, 'synset': 'garrison_cap.n.01', 'name': 'garrison_cap'}, {'id': 7977, 'synset': 'garrote.n.01', 'name': 'garrote'}, {'id': 7978, 'synset': 'garter.n.01', 'name': 'garter'}, {'id': 7979, 'synset': 'garter_belt.n.01', 'name': 'garter_belt'}, {'id': 7980, 'synset': 'garter_stitch.n.01', 'name': 'garter_stitch'}, {'id': 7981, 'synset': 'gas_guzzler.n.01', 'name': 'gas_guzzler'}, {'id': 7982, 'synset': 'gas_shell.n.01', 'name': 'gas_shell'}, {'id': 7983, 'synset': 'gas_bracket.n.01', 'name': 'gas_bracket'}, {'id': 7984, 'synset': 'gas_burner.n.01', 'name': 'gas_burner'}, {'id': 7985, 'synset': 'gas-cooled_reactor.n.01', 'name': 'gas-cooled_reactor'}, {'id': 7986, 'synset': 'gas-discharge_tube.n.01', 'name': 'gas-discharge_tube'}, {'id': 7987, 'synset': 'gas_engine.n.01', 'name': 'gas_engine'}, {'id': 7988, 'synset': 'gas_fixture.n.01', 'name': 'gas_fixture'}, {'id': 7989, 'synset': 'gas_furnace.n.01', 'name': 'gas_furnace'}, {'id': 7990, 'synset': 'gas_gun.n.01', 'name': 'gas_gun'}, {'id': 7991, 'synset': 'gas_heater.n.01', 'name': 'gas_heater'}, {'id': 7992, 'synset': 'gas_holder.n.01', 'name': 'gas_holder'}, {'id': 7993, 'synset': 'gasket.n.01', 'name': 'gasket'}, {'id': 7994, 'synset': 'gas_lamp.n.01', 'name': 'gas_lamp'}, {'id': 7995, 'synset': 'gas_maser.n.01', 'name': 'gas_maser'}, {'id': 7996, 'synset': 'gas_meter.n.01', 'name': 'gas_meter'}, {'id': 7997, 'synset': 'gasoline_engine.n.01', 'name': 'gasoline_engine'}, {'id': 7998, 'synset': 'gasoline_gauge.n.01', 'name': 'gasoline_gauge'}, {'id': 7999, 'synset': 'gas_oven.n.02', 'name': 'gas_oven'}, {'id': 8000, 'synset': 'gas_oven.n.01', 'name': 'gas_oven'}, {'id': 8001, 'synset': 'gas_pump.n.01', 'name': 'gas_pump'}, {'id': 8002, 'synset': 'gas_range.n.01', 'name': 'gas_range'}, {'id': 8003, 'synset': 'gas_ring.n.01', 'name': 'gas_ring'}, {'id': 8004, 'synset': 'gas_tank.n.01', 'name': 'gas_tank'}, {'id': 8005, 'synset': 'gas_thermometer.n.01', 'name': 'gas_thermometer'}, {'id': 8006, 'synset': 'gastroscope.n.01', 'name': 'gastroscope'}, {'id': 8007, 'synset': 'gas_turbine.n.01', 'name': 'gas_turbine'}, {'id': 8008, 'synset': 'gas-turbine_ship.n.01', 'name': 'gas-turbine_ship'}, {'id': 8009, 'synset': 'gat.n.01', 'name': 'gat'}, {'id': 8010, 'synset': 'gate.n.01', 'name': 'gate'}, {'id': 8011, 'synset': 'gatehouse.n.01', 'name': 'gatehouse'}, {'id': 8012, 'synset': 'gateleg_table.n.01', 'name': 'gateleg_table'}, {'id': 8013, 'synset': 'gatepost.n.01', 'name': 'gatepost'}, {'id': 8014, 'synset': 'gathered_skirt.n.01', 'name': 'gathered_skirt'}, {'id': 8015, 'synset': 'gatling_gun.n.01', 'name': 'Gatling_gun'}, {'id': 8016, 'synset': 'gauge.n.01', 'name': 'gauge'}, {'id': 8017, 'synset': 'gauntlet.n.03', 'name': 'gauntlet'}, {'id': 8018, 'synset': 'gauntlet.n.02', 'name': 'gauntlet'}, {'id': 8019, 'synset': 'gauze.n.02', 'name': 'gauze'}, {'id': 8020, 'synset': 'gauze.n.01', 'name': 'gauze'}, {'id': 8021, 'synset': 'gavel.n.01', 'name': 'gavel'}, {'id': 8022, 'synset': 'gazebo.n.01', 'name': 'gazebo'}, {'id': 8023, 'synset': 'gear.n.01', 'name': 'gear'}, {'id': 8024, 'synset': 'gear.n.04', 'name': 'gear'}, {'id': 8025, 'synset': 'gear.n.03', 'name': 'gear'}, {'id': 8026, 'synset': 'gearbox.n.01', 'name': 'gearbox'}, {'id': 8027, 'synset': 'gearing.n.01', 'name': 'gearing'}, {'id': 8028, 'synset': 'gearset.n.01', 'name': 'gearset'}, {'id': 8029, 'synset': 'gearshift.n.01', 'name': 'gearshift'}, {'id': 8030, 'synset': 'geiger_counter.n.01', 'name': 'Geiger_counter'}, {'id': 8031, 'synset': 'geiger_tube.n.01', 'name': 'Geiger_tube'}, {'id': 8032, 'synset': 'gene_chip.n.01', 'name': 'gene_chip'}, {'id': 8033, 'synset': 'general-purpose_bomb.n.01', 'name': 'general-purpose_bomb'}, {'id': 8034, 'synset': 'generator.n.01', 'name': 'generator'}, {'id': 8035, 'synset': 'generator.n.04', 'name': 'generator'}, {'id': 8036, 'synset': 'geneva_gown.n.01', 'name': 'Geneva_gown'}, {'id': 8037, 'synset': 'geodesic_dome.n.01', 'name': 'geodesic_dome'}, {'id': 8038, 'synset': 'georgette.n.01', 'name': 'georgette'}, {'id': 8039, 'synset': 'gharry.n.01', 'name': 'gharry'}, {'id': 8040, 'synset': 'ghat.n.01', 'name': 'ghat'}, {'id': 8041, 'synset': 'ghetto_blaster.n.01', 'name': 'ghetto_blaster'}, {'id': 8042, 'synset': 'gift_shop.n.01', 'name': 'gift_shop'}, {'id': 8043, 'synset': 'gift_wrapping.n.01', 'name': 'gift_wrapping'}, {'id': 8044, 'synset': 'gig.n.05', 'name': 'gig'}, {'id': 8045, 'synset': 'gig.n.04', 'name': 'gig'}, {'id': 8046, 'synset': 'gig.n.01', 'name': 'gig'}, {'id': 8047, 'synset': 'gig.n.03', 'name': 'gig'}, {'id': 8048, 'synset': 'gildhall.n.01', 'name': 'gildhall'}, {'id': 8049, 'synset': 'gill_net.n.01', 'name': 'gill_net'}, {'id': 8050, 'synset': 'gilt.n.01', 'name': 'gilt'}, {'id': 8051, 'synset': 'gimbal.n.01', 'name': 'gimbal'}, {'id': 8052, 'synset': 'gingham.n.01', 'name': 'gingham'}, {'id': 8053, 'synset': 'girandole.n.01', 'name': 'girandole'}, {'id': 8054, 'synset': 'girder.n.01', 'name': 'girder'}, {'id': 8055, 'synset': 'glass.n.07', 'name': 'glass'}, {'id': 8056, 'synset': 'glass_cutter.n.03', 'name': 'glass_cutter'}, {'id': 8057, 'synset': 'glasses_case.n.01', 'name': 'glasses_case'}, {'id': 8058, 'synset': 'glebe_house.n.01', 'name': 'glebe_house'}, {'id': 8059, 'synset': 'glengarry.n.01', 'name': 'Glengarry'}, {'id': 8060, 'synset': 'glider.n.01', 'name': 'glider'}, {'id': 8061, 'synset': 'global_positioning_system.n.01', 'name': 'Global_Positioning_System'}, {'id': 8062, 'synset': 'glockenspiel.n.01', 'name': 'glockenspiel'}, {'id': 8063, 'synset': 'glory_hole.n.01', 'name': 'glory_hole'}, {'id': 8064, 'synset': 'glove_compartment.n.01', 'name': 'glove_compartment'}, {'id': 8065, 'synset': 'glow_lamp.n.01', 'name': 'glow_lamp'}, {'id': 8066, 'synset': 'glow_tube.n.01', 'name': 'glow_tube'}, {'id': 8067, 'synset': 'glyptic_art.n.01', 'name': 'glyptic_art'}, {'id': 8068, 'synset': 'glyptics.n.01', 'name': 'glyptics'}, {'id': 8069, 'synset': 'gnomon.n.01', 'name': 'gnomon'}, {'id': 8070, 'synset': 'goal.n.03', 'name': 'goal'}, {'id': 8071, 'synset': 'goalmouth.n.01', 'name': 'goalmouth'}, {'id': 8072, 'synset': 'goalpost.n.01', 'name': 'goalpost'}, {'id': 8073, 'synset': 'goblet.n.01', 'name': 'goblet'}, {'id': 8074, 'synset': 'godown.n.01', 'name': 'godown'}, {'id': 8075, 'synset': 'go-kart.n.01', 'name': 'go-kart'}, {'id': 8076, 'synset': 'gold_plate.n.02', 'name': 'gold_plate'}, {'id': 8077, 'synset': 'golf_bag.n.01', 'name': 'golf_bag'}, {'id': 8078, 'synset': 'golf_ball.n.01', 'name': 'golf_ball'}, {'id': 8079, 'synset': 'golf-club_head.n.01', 'name': 'golf-club_head'}, {'id': 8080, 'synset': 'golf_equipment.n.01', 'name': 'golf_equipment'}, {'id': 8081, 'synset': 'golf_glove.n.01', 'name': 'golf_glove'}, {'id': 8082, 'synset': 'golliwog.n.01', 'name': 'golliwog'}, {'id': 8083, 'synset': 'gong.n.01', 'name': 'gong'}, {'id': 8084, 'synset': 'goniometer.n.01', 'name': 'goniometer'}, {'id': 8085, 'synset': 'gordian_knot.n.02', 'name': 'Gordian_knot'}, {'id': 8086, 'synset': 'gorget.n.01', 'name': 'gorget'}, {'id': 8087, 'synset': 'gossamer.n.01', 'name': 'gossamer'}, {'id': 8088, 'synset': 'gothic_arch.n.01', 'name': 'Gothic_arch'}, {'id': 8089, 'synset': 'gouache.n.01', 'name': 'gouache'}, {'id': 8090, 'synset': 'gouge.n.02', 'name': 'gouge'}, {'id': 8091, 'synset': 'gourd.n.01', 'name': 'gourd'}, {'id': 8092, 'synset': 'government_building.n.01', 'name': 'government_building'}, {'id': 8093, 'synset': 'government_office.n.01', 'name': 'government_office'}, {'id': 8094, 'synset': 'gown.n.01', 'name': 'gown'}, {'id': 8095, 'synset': 'gown.n.05', 'name': 'gown'}, {'id': 8096, 'synset': 'gown.n.04', 'name': 'gown'}, {'id': 8097, 'synset': 'grab.n.01', 'name': 'grab'}, {'id': 8098, 'synset': 'grab_bag.n.02', 'name': 'grab_bag'}, {'id': 8099, 'synset': 'grab_bar.n.01', 'name': 'grab_bar'}, {'id': 8100, 'synset': 'grace_cup.n.01', 'name': 'grace_cup'}, {'id': 8101, 'synset': 'grade_separation.n.01', 'name': 'grade_separation'}, {'id': 8102, 'synset': 'graduated_cylinder.n.01', 'name': 'graduated_cylinder'}, {'id': 8103, 'synset': 'graffito.n.01', 'name': 'graffito'}, {'id': 8104, 'synset': 'gramophone.n.01', 'name': 'gramophone'}, {'id': 8105, 'synset': 'granary.n.01', 'name': 'granary'}, {'id': 8106, 'synset': 'grandfather_clock.n.01', 'name': 'grandfather_clock'}, {'id': 8107, 'synset': 'grand_piano.n.01', 'name': 'grand_piano'}, {'id': 8108, 'synset': 'graniteware.n.01', 'name': 'graniteware'}, {'id': 8109, 'synset': 'granny_knot.n.01', 'name': 'granny_knot'}, {'id': 8110, 'synset': 'grape_arbor.n.01', 'name': 'grape_arbor'}, {'id': 8111, 'synset': 'grapnel.n.02', 'name': 'grapnel'}, {'id': 8112, 'synset': 'grapnel.n.01', 'name': 'grapnel'}, {'id': 8113, 'synset': 'grass_skirt.n.01', 'name': 'grass_skirt'}, {'id': 8114, 'synset': 'grate.n.01', 'name': 'grate'}, {'id': 8115, 'synset': 'grate.n.03', 'name': 'grate'}, {'id': 8116, 'synset': 'graver.n.01', 'name': 'graver'}, {'id': 8117, 'synset': 'gravimeter.n.02', 'name': 'gravimeter'}, {'id': 8118, 'synset': 'gravure.n.03', 'name': 'gravure'}, {'id': 8119, 'synset': 'grey.n.06', 'name': 'grey'}, {'id': 8120, 'synset': 'grease-gun.n.01', 'name': 'grease-gun'}, {'id': 8121, 'synset': 'greasepaint.n.01', 'name': 'greasepaint'}, {'id': 8122, 'synset': 'greasy_spoon.n.01', 'name': 'greasy_spoon'}, {'id': 8123, 'synset': 'greatcoat.n.01', 'name': 'greatcoat'}, {'id': 8124, 'synset': 'great_hall.n.01', 'name': 'great_hall'}, {'id': 8125, 'synset': 'greave.n.01', 'name': 'greave'}, {'id': 8126, 'synset': 'greengrocery.n.02', 'name': 'greengrocery'}, {'id': 8127, 'synset': 'greenhouse.n.01', 'name': 'greenhouse'}, {'id': 8128, 'synset': 'grenade.n.01', 'name': 'grenade'}, {'id': 8129, 'synset': 'grid.n.05', 'name': 'grid'}, {'id': 8130, 'synset': 'grille.n.02', 'name': 'grille'}, {'id': 8131, 'synset': 'grillroom.n.01', 'name': 'grillroom'}, {'id': 8132, 'synset': 'grinder.n.04', 'name': 'grinder'}, {'id': 8133, 'synset': 'grinding_wheel.n.01', 'name': 'grinding_wheel'}, {'id': 8134, 'synset': 'grindstone.n.01', 'name': 'grindstone'}, {'id': 8135, 'synset': 'gripsack.n.01', 'name': 'gripsack'}, {'id': 8136, 'synset': 'gristmill.n.01', 'name': 'gristmill'}, {'id': 8137, 'synset': 'grocery_store.n.01', 'name': 'grocery_store'}, {'id': 8138, 'synset': 'grogram.n.01', 'name': 'grogram'}, {'id': 8139, 'synset': 'groined_vault.n.01', 'name': 'groined_vault'}, {'id': 8140, 'synset': 'groover.n.01', 'name': 'groover'}, {'id': 8141, 'synset': 'grosgrain.n.01', 'name': 'grosgrain'}, {'id': 8142, 'synset': 'gros_point.n.01', 'name': 'gros_point'}, {'id': 8143, 'synset': 'ground.n.09', 'name': 'ground'}, {'id': 8144, 'synset': 'ground_bait.n.01', 'name': 'ground_bait'}, {'id': 8145, 'synset': 'ground_control.n.01', 'name': 'ground_control'}, {'id': 8146, 'synset': 'ground_floor.n.01', 'name': 'ground_floor'}, {'id': 8147, 'synset': 'groundsheet.n.01', 'name': 'groundsheet'}, {'id': 8148, 'synset': 'g-string.n.01', 'name': 'G-string'}, {'id': 8149, 'synset': 'guard.n.03', 'name': 'guard'}, {'id': 8150, 'synset': 'guard_boat.n.01', 'name': 'guard_boat'}, {'id': 8151, 'synset': 'guardroom.n.02', 'name': 'guardroom'}, {'id': 8152, 'synset': 'guardroom.n.01', 'name': 'guardroom'}, {'id': 8153, 'synset': 'guard_ship.n.01', 'name': 'guard_ship'}, {'id': 8154, 'synset': "guard's_van.n.01", 'name': "guard's_van"}, {'id': 8155, 'synset': 'gueridon.n.01', 'name': 'gueridon'}, {'id': 8156, 'synset': 'guarnerius.n.03', 'name': 'Guarnerius'}, {'id': 8157, 'synset': 'guesthouse.n.01', 'name': 'guesthouse'}, {'id': 8158, 'synset': 'guestroom.n.01', 'name': 'guestroom'}, {'id': 8159, 'synset': 'guidance_system.n.01', 'name': 'guidance_system'}, {'id': 8160, 'synset': 'guided_missile.n.01', 'name': 'guided_missile'}, {'id': 8161, 'synset': 'guided_missile_cruiser.n.01', 'name': 'guided_missile_cruiser'}, {'id': 8162, 'synset': 'guided_missile_frigate.n.01', 'name': 'guided_missile_frigate'}, {'id': 8163, 'synset': 'guildhall.n.01', 'name': 'guildhall'}, {'id': 8164, 'synset': 'guilloche.n.01', 'name': 'guilloche'}, {'id': 8165, 'synset': 'guillotine.n.02', 'name': 'guillotine'}, {'id': 8166, 'synset': 'guimpe.n.02', 'name': 'guimpe'}, {'id': 8167, 'synset': 'guimpe.n.01', 'name': 'guimpe'}, {'id': 8168, 'synset': 'guitar_pick.n.01', 'name': 'guitar_pick'}, {'id': 8169, 'synset': 'gulag.n.01', 'name': 'gulag'}, {'id': 8170, 'synset': 'gunboat.n.01', 'name': 'gunboat'}, {'id': 8171, 'synset': 'gun_carriage.n.01', 'name': 'gun_carriage'}, {'id': 8172, 'synset': 'gun_case.n.01', 'name': 'gun_case'}, {'id': 8173, 'synset': 'gun_emplacement.n.01', 'name': 'gun_emplacement'}, {'id': 8174, 'synset': 'gun_enclosure.n.01', 'name': 'gun_enclosure'}, {'id': 8175, 'synset': 'gunlock.n.01', 'name': 'gunlock'}, {'id': 8176, 'synset': 'gunnery.n.01', 'name': 'gunnery'}, {'id': 8177, 'synset': 'gunnysack.n.01', 'name': 'gunnysack'}, {'id': 8178, 'synset': 'gun_pendulum.n.01', 'name': 'gun_pendulum'}, {'id': 8179, 'synset': 'gun_room.n.01', 'name': 'gun_room'}, {'id': 8180, 'synset': 'gunsight.n.01', 'name': 'gunsight'}, {'id': 8181, 'synset': 'gun_trigger.n.01', 'name': 'gun_trigger'}, {'id': 8182, 'synset': 'gurney.n.01', 'name': 'gurney'}, {'id': 8183, 'synset': 'gusher.n.01', 'name': 'gusher'}, {'id': 8184, 'synset': 'gusset.n.03', 'name': 'gusset'}, {'id': 8185, 'synset': 'gusset.n.02', 'name': 'gusset'}, {'id': 8186, 'synset': 'guy.n.03', 'name': 'guy'}, {'id': 8187, 'synset': 'gymnastic_apparatus.n.01', 'name': 'gymnastic_apparatus'}, {'id': 8188, 'synset': 'gym_shoe.n.01', 'name': 'gym_shoe'}, {'id': 8189, 'synset': 'gym_suit.n.01', 'name': 'gym_suit'}, {'id': 8190, 'synset': 'gymslip.n.01', 'name': 'gymslip'}, {'id': 8191, 'synset': 'gypsy_cab.n.01', 'name': 'gypsy_cab'}, {'id': 8192, 'synset': 'gyrocompass.n.01', 'name': 'gyrocompass'}, {'id': 8193, 'synset': 'gyroscope.n.01', 'name': 'gyroscope'}, {'id': 8194, 'synset': 'gyrostabilizer.n.01', 'name': 'gyrostabilizer'}, {'id': 8195, 'synset': 'habergeon.n.01', 'name': 'habergeon'}, {'id': 8196, 'synset': 'habit.n.03', 'name': 'habit'}, {'id': 8197, 'synset': 'habit.n.05', 'name': 'habit'}, {'id': 8198, 'synset': 'hacienda.n.02', 'name': 'hacienda'}, {'id': 8199, 'synset': 'hacksaw.n.01', 'name': 'hacksaw'}, {'id': 8200, 'synset': 'haft.n.01', 'name': 'haft'}, {'id': 8201, 'synset': 'haircloth.n.01', 'name': 'haircloth'}, {'id': 8202, 'synset': 'hairdressing.n.01', 'name': 'hairdressing'}, {'id': 8203, 'synset': 'hairpiece.n.01', 'name': 'hairpiece'}, {'id': 8204, 'synset': 'hair_shirt.n.01', 'name': 'hair_shirt'}, {'id': 8205, 'synset': 'hair_slide.n.01', 'name': 'hair_slide'}, {'id': 8206, 'synset': 'hair_spray.n.01', 'name': 'hair_spray'}, {'id': 8207, 'synset': 'hairspring.n.01', 'name': 'hairspring'}, {'id': 8208, 'synset': 'hair_trigger.n.01', 'name': 'hair_trigger'}, {'id': 8209, 'synset': 'halberd.n.01', 'name': 'halberd'}, {'id': 8210, 'synset': 'half_binding.n.01', 'name': 'half_binding'}, {'id': 8211, 'synset': 'half_hatchet.n.01', 'name': 'half_hatchet'}, {'id': 8212, 'synset': 'half_hitch.n.01', 'name': 'half_hitch'}, {'id': 8213, 'synset': 'half_track.n.01', 'name': 'half_track'}, {'id': 8214, 'synset': 'hall.n.13', 'name': 'hall'}, {'id': 8215, 'synset': 'hall.n.03', 'name': 'hall'}, {'id': 8216, 'synset': 'hall.n.12', 'name': 'hall'}, {'id': 8217, 'synset': 'hall_of_fame.n.01', 'name': 'Hall_of_Fame'}, {'id': 8218, 'synset': 'hall_of_residence.n.01', 'name': 'hall_of_residence'}, {'id': 8219, 'synset': 'hallstand.n.01', 'name': 'hallstand'}, {'id': 8220, 'synset': 'halter.n.01', 'name': 'halter'}, {'id': 8221, 'synset': 'hame.n.01', 'name': 'hame'}, {'id': 8222, 'synset': 'hammer.n.07', 'name': 'hammer'}, {'id': 8223, 'synset': 'hammer.n.05', 'name': 'hammer'}, {'id': 8224, 'synset': 'hammerhead.n.02', 'name': 'hammerhead'}, {'id': 8225, 'synset': 'hand.n.08', 'name': 'hand'}, {'id': 8226, 'synset': 'handball.n.01', 'name': 'handball'}, {'id': 8227, 'synset': 'handbarrow.n.01', 'name': 'handbarrow'}, {'id': 8228, 'synset': 'handbell.n.01', 'name': 'handbell'}, {'id': 8229, 'synset': 'handbow.n.01', 'name': 'handbow'}, {'id': 8230, 'synset': 'hand_brake.n.01', 'name': 'hand_brake'}, {'id': 8231, 'synset': 'hand_calculator.n.01', 'name': 'hand_calculator'}, {'id': 8232, 'synset': 'handcar.n.01', 'name': 'handcar'}, {'id': 8233, 'synset': 'hand_cream.n.01', 'name': 'hand_cream'}, {'id': 8234, 'synset': 'hand_drill.n.01', 'name': 'hand_drill'}, {'id': 8235, 'synset': 'hand_glass.n.02', 'name': 'hand_glass'}, {'id': 8236, 'synset': 'hand_grenade.n.01', 'name': 'hand_grenade'}, {'id': 8237, 'synset': 'hand-held_computer.n.01', 'name': 'hand-held_computer'}, {'id': 8238, 'synset': 'handhold.n.01', 'name': 'handhold'}, {'id': 8239, 'synset': 'handlebar.n.01', 'name': 'handlebar'}, {'id': 8240, 'synset': 'handloom.n.01', 'name': 'handloom'}, {'id': 8241, 'synset': 'hand_lotion.n.01', 'name': 'hand_lotion'}, {'id': 8242, 'synset': 'hand_luggage.n.01', 'name': 'hand_luggage'}, {'id': 8243, 'synset': 'hand-me-down.n.01', 'name': 'hand-me-down'}, {'id': 8244, 'synset': 'hand_mower.n.01', 'name': 'hand_mower'}, {'id': 8245, 'synset': 'hand_pump.n.01', 'name': 'hand_pump'}, {'id': 8246, 'synset': 'handrest.n.01', 'name': 'handrest'}, {'id': 8247, 'synset': 'handset.n.01', 'name': 'handset'}, {'id': 8248, 'synset': 'hand_shovel.n.01', 'name': 'hand_shovel'}, {'id': 8249, 'synset': 'handspike.n.01', 'name': 'handspike'}, {'id': 8250, 'synset': 'handstamp.n.01', 'name': 'handstamp'}, {'id': 8251, 'synset': 'hand_throttle.n.01', 'name': 'hand_throttle'}, {'id': 8252, 'synset': 'hand_tool.n.01', 'name': 'hand_tool'}, {'id': 8253, 'synset': 'hand_truck.n.01', 'name': 'hand_truck'}, {'id': 8254, 'synset': 'handwear.n.01', 'name': 'handwear'}, {'id': 8255, 'synset': 'handwheel.n.02', 'name': 'handwheel'}, {'id': 8256, 'synset': 'handwheel.n.01', 'name': 'handwheel'}, {'id': 8257, 'synset': 'hangar_queen.n.01', 'name': 'hangar_queen'}, {'id': 8258, 'synset': 'hanger.n.02', 'name': 'hanger'}, {'id': 8259, 'synset': 'hang_glider.n.02', 'name': 'hang_glider'}, {'id': 8260, 'synset': "hangman's_rope.n.01", 'name': "hangman's_rope"}, {'id': 8261, 'synset': 'hank.n.01', 'name': 'hank'}, {'id': 8262, 'synset': 'hansom.n.01', 'name': 'hansom'}, {'id': 8263, 'synset': 'harbor.n.02', 'name': 'harbor'}, {'id': 8264, 'synset': 'hard_disc.n.01', 'name': 'hard_disc'}, {'id': 8265, 'synset': 'hard_hat.n.02', 'name': 'hard_hat'}, {'id': 8266, 'synset': 'hardtop.n.01', 'name': 'hardtop'}, {'id': 8267, 'synset': 'hardware.n.02', 'name': 'hardware'}, {'id': 8268, 'synset': 'hardware_store.n.01', 'name': 'hardware_store'}, {'id': 8269, 'synset': 'harmonica.n.01', 'name': 'harmonica'}, {'id': 8270, 'synset': 'harness.n.02', 'name': 'harness'}, {'id': 8271, 'synset': 'harness.n.01', 'name': 'harness'}, {'id': 8272, 'synset': 'harp.n.01', 'name': 'harp'}, {'id': 8273, 'synset': 'harp.n.02', 'name': 'harp'}, {'id': 8274, 'synset': 'harpoon.n.01', 'name': 'harpoon'}, {'id': 8275, 'synset': 'harpoon_gun.n.01', 'name': 'harpoon_gun'}, {'id': 8276, 'synset': 'harpoon_log.n.01', 'name': 'harpoon_log'}, {'id': 8277, 'synset': 'harpsichord.n.01', 'name': 'harpsichord'}, {'id': 8278, 'synset': 'harris_tweed.n.01', 'name': 'Harris_Tweed'}, {'id': 8279, 'synset': 'harrow.n.01', 'name': 'harrow'}, {'id': 8280, 'synset': 'harvester.n.02', 'name': 'harvester'}, {'id': 8281, 'synset': 'hash_house.n.01', 'name': 'hash_house'}, {'id': 8282, 'synset': 'hasp.n.01', 'name': 'hasp'}, {'id': 8283, 'synset': 'hatch.n.03', 'name': 'hatch'}, {'id': 8284, 'synset': 'hatchback.n.02', 'name': 'hatchback'}, {'id': 8285, 'synset': 'hatchback.n.01', 'name': 'hatchback'}, {'id': 8286, 'synset': 'hatchel.n.01', 'name': 'hatchel'}, {'id': 8287, 'synset': 'hatchet.n.02', 'name': 'hatchet'}, {'id': 8288, 'synset': 'hatpin.n.01', 'name': 'hatpin'}, {'id': 8289, 'synset': 'hauberk.n.01', 'name': 'hauberk'}, {'id': 8290, 'synset': 'hawaiian_guitar.n.01', 'name': 'Hawaiian_guitar'}, {'id': 8291, 'synset': 'hawse.n.01', 'name': 'hawse'}, {'id': 8292, 'synset': 'hawser.n.01', 'name': 'hawser'}, {'id': 8293, 'synset': 'hawser_bend.n.01', 'name': 'hawser_bend'}, {'id': 8294, 'synset': 'hay_bale.n.01', 'name': 'hay_bale'}, {'id': 8295, 'synset': 'hayfork.n.01', 'name': 'hayfork'}, {'id': 8296, 'synset': 'hayloft.n.01', 'name': 'hayloft'}, {'id': 8297, 'synset': 'haymaker.n.01', 'name': 'haymaker'}, {'id': 8298, 'synset': 'hayrack.n.02', 'name': 'hayrack'}, {'id': 8299, 'synset': 'hayrack.n.01', 'name': 'hayrack'}, {'id': 8300, 'synset': 'hazard.n.03', 'name': 'hazard'}, {'id': 8301, 'synset': 'head.n.31', 'name': 'head'}, {'id': 8302, 'synset': 'head.n.30', 'name': 'head'}, {'id': 8303, 'synset': 'head.n.29', 'name': 'head'}, {'id': 8304, 'synset': 'headdress.n.01', 'name': 'headdress'}, {'id': 8305, 'synset': 'header.n.05', 'name': 'header'}, {'id': 8306, 'synset': 'header.n.04', 'name': 'header'}, {'id': 8307, 'synset': 'header.n.03', 'name': 'header'}, {'id': 8308, 'synset': 'header.n.02', 'name': 'header'}, {'id': 8309, 'synset': 'headfast.n.01', 'name': 'headfast'}, {'id': 8310, 'synset': 'head_gasket.n.01', 'name': 'head_gasket'}, {'id': 8311, 'synset': 'head_gate.n.02', 'name': 'head_gate'}, {'id': 8312, 'synset': 'headgear.n.03', 'name': 'headgear'}, {'id': 8313, 'synset': 'headpiece.n.02', 'name': 'headpiece'}, {'id': 8314, 'synset': 'headpin.n.01', 'name': 'headpin'}, {'id': 8315, 'synset': 'headquarters.n.01', 'name': 'headquarters'}, {'id': 8316, 'synset': 'headrace.n.01', 'name': 'headrace'}, {'id': 8317, 'synset': 'headrest.n.02', 'name': 'headrest'}, {'id': 8318, 'synset': 'headsail.n.01', 'name': 'headsail'}, {'id': 8319, 'synset': 'head_shop.n.01', 'name': 'head_shop'}, {'id': 8320, 'synset': 'headstock.n.01', 'name': 'headstock'}, {'id': 8321, 'synset': 'health_spa.n.01', 'name': 'health_spa'}, {'id': 8322, 'synset': 'hearing_aid.n.02', 'name': 'hearing_aid'}, {'id': 8323, 'synset': 'hearing_aid.n.01', 'name': 'hearing_aid'}, {'id': 8324, 'synset': 'hearse.n.01', 'name': 'hearse'}, {'id': 8325, 'synset': 'hearth.n.02', 'name': 'hearth'}, {'id': 8326, 'synset': 'hearthrug.n.01', 'name': 'hearthrug'}, {'id': 8327, 'synset': 'heart-lung_machine.n.01', 'name': 'heart-lung_machine'}, {'id': 8328, 'synset': 'heat_engine.n.01', 'name': 'heat_engine'}, {'id': 8329, 'synset': 'heat_exchanger.n.01', 'name': 'heat_exchanger'}, {'id': 8330, 'synset': 'heating_pad.n.01', 'name': 'heating_pad'}, {'id': 8331, 'synset': 'heat_lamp.n.01', 'name': 'heat_lamp'}, {'id': 8332, 'synset': 'heat_pump.n.01', 'name': 'heat_pump'}, {'id': 8333, 'synset': 'heat-seeking_missile.n.01', 'name': 'heat-seeking_missile'}, {'id': 8334, 'synset': 'heat_shield.n.01', 'name': 'heat_shield'}, {'id': 8335, 'synset': 'heat_sink.n.01', 'name': 'heat_sink'}, {'id': 8336, 'synset': 'heaume.n.01', 'name': 'heaume'}, {'id': 8337, 'synset': 'heaver.n.01', 'name': 'heaver'}, {'id': 8338, 'synset': 'heavier-than-air_craft.n.01', 'name': 'heavier-than-air_craft'}, {'id': 8339, 'synset': 'heckelphone.n.01', 'name': 'heckelphone'}, {'id': 8340, 'synset': 'hectograph.n.01', 'name': 'hectograph'}, {'id': 8341, 'synset': 'hedge.n.01', 'name': 'hedge'}, {'id': 8342, 'synset': 'hedge_trimmer.n.01', 'name': 'hedge_trimmer'}, {'id': 8343, 'synset': 'helicon.n.01', 'name': 'helicon'}, {'id': 8344, 'synset': 'heliograph.n.01', 'name': 'heliograph'}, {'id': 8345, 'synset': 'heliometer.n.01', 'name': 'heliometer'}, {'id': 8346, 'synset': 'helm.n.01', 'name': 'helm'}, {'id': 8347, 'synset': 'helmet.n.01', 'name': 'helmet'}, {'id': 8348, 'synset': 'hematocrit.n.02', 'name': 'hematocrit'}, {'id': 8349, 'synset': 'hemming-stitch.n.01', 'name': 'hemming-stitch'}, {'id': 8350, 'synset': 'hemostat.n.01', 'name': 'hemostat'}, {'id': 8351, 'synset': 'hemstitch.n.01', 'name': 'hemstitch'}, {'id': 8352, 'synset': 'henroost.n.01', 'name': 'henroost'}, {'id': 8353, 'synset': 'heraldry.n.02', 'name': 'heraldry'}, {'id': 8354, 'synset': 'hermitage.n.01', 'name': 'hermitage'}, {'id': 8355, 'synset': 'herringbone.n.01', 'name': 'herringbone'}, {'id': 8356, 'synset': 'herringbone.n.02', 'name': 'herringbone'}, {'id': 8357, 'synset': 'herschelian_telescope.n.01', 'name': 'Herschelian_telescope'}, {'id': 8358, 'synset': 'hessian_boot.n.01', 'name': 'Hessian_boot'}, {'id': 8359, 'synset': 'heterodyne_receiver.n.01', 'name': 'heterodyne_receiver'}, {'id': 8360, 'synset': 'hibachi.n.01', 'name': 'hibachi'}, {'id': 8361, 'synset': 'hideaway.n.02', 'name': 'hideaway'}, {'id': 8362, 'synset': 'hi-fi.n.01', 'name': 'hi-fi'}, {'id': 8363, 'synset': 'high_altar.n.01', 'name': 'high_altar'}, {'id': 8364, 'synset': 'high-angle_gun.n.01', 'name': 'high-angle_gun'}, {'id': 8365, 'synset': 'highball_glass.n.01', 'name': 'highball_glass'}, {'id': 8366, 'synset': 'highboard.n.01', 'name': 'highboard'}, {'id': 8367, 'synset': 'highboy.n.01', 'name': 'highboy'}, {'id': 8368, 'synset': 'high_gear.n.01', 'name': 'high_gear'}, {'id': 8369, 'synset': 'high-hat_cymbal.n.01', 'name': 'high-hat_cymbal'}, {'id': 8370, 'synset': 'highlighter.n.02', 'name': 'highlighter'}, {'id': 8371, 'synset': 'highlighter.n.01', 'name': 'highlighter'}, {'id': 8372, 'synset': 'high-pass_filter.n.01', 'name': 'high-pass_filter'}, {'id': 8373, 'synset': 'high-rise.n.01', 'name': 'high-rise'}, {'id': 8374, 'synset': 'high_table.n.01', 'name': 'high_table'}, {'id': 8375, 'synset': 'high-warp_loom.n.01', 'name': 'high-warp_loom'}, {'id': 8376, 'synset': 'hijab.n.01', 'name': 'hijab'}, {'id': 8377, 'synset': 'hinging_post.n.01', 'name': 'hinging_post'}, {'id': 8378, 'synset': 'hip_boot.n.01', 'name': 'hip_boot'}, {'id': 8379, 'synset': 'hipflask.n.01', 'name': 'hipflask'}, {'id': 8380, 'synset': 'hip_pad.n.01', 'name': 'hip_pad'}, {'id': 8381, 'synset': 'hip_pocket.n.01', 'name': 'hip_pocket'}, {'id': 8382, 'synset': 'hippodrome.n.01', 'name': 'hippodrome'}, {'id': 8383, 'synset': 'hip_roof.n.01', 'name': 'hip_roof'}, {'id': 8384, 'synset': 'hitch.n.05', 'name': 'hitch'}, {'id': 8385, 'synset': 'hitch.n.04', 'name': 'hitch'}, {'id': 8386, 'synset': 'hitching_post.n.01', 'name': 'hitching_post'}, {'id': 8387, 'synset': 'hitchrack.n.01', 'name': 'hitchrack'}, {'id': 8388, 'synset': 'hob.n.03', 'name': 'hob'}, {'id': 8389, 'synset': 'hobble_skirt.n.01', 'name': 'hobble_skirt'}, {'id': 8390, 'synset': 'hockey_skate.n.01', 'name': 'hockey_skate'}, {'id': 8391, 'synset': 'hod.n.01', 'name': 'hod'}, {'id': 8392, 'synset': 'hodoscope.n.01', 'name': 'hodoscope'}, {'id': 8393, 'synset': 'hoe.n.01', 'name': 'hoe'}, {'id': 8394, 'synset': 'hoe_handle.n.01', 'name': 'hoe_handle'}, {'id': 8395, 'synset': 'hogshead.n.02', 'name': 'hogshead'}, {'id': 8396, 'synset': 'hoist.n.01', 'name': 'hoist'}, {'id': 8397, 'synset': 'hold.n.07', 'name': 'hold'}, {'id': 8398, 'synset': 'holder.n.01', 'name': 'holder'}, {'id': 8399, 'synset': 'holding_cell.n.01', 'name': 'holding_cell'}, {'id': 8400, 'synset': 'holding_device.n.01', 'name': 'holding_device'}, {'id': 8401, 'synset': 'holding_pen.n.01', 'name': 'holding_pen'}, {'id': 8402, 'synset': 'hollowware.n.01', 'name': 'hollowware'}, {'id': 8403, 'synset': 'holster.n.01', 'name': 'holster'}, {'id': 8404, 'synset': 'holster.n.02', 'name': 'holster'}, {'id': 8405, 'synset': 'holy_of_holies.n.02', 'name': 'holy_of_holies'}, {'id': 8406, 'synset': 'home.n.09', 'name': 'home'}, {'id': 8407, 'synset': 'home_appliance.n.01', 'name': 'home_appliance'}, {'id': 8408, 'synset': 'home_computer.n.01', 'name': 'home_computer'}, {'id': 8409, 'synset': 'home_room.n.01', 'name': 'home_room'}, {'id': 8410, 'synset': 'homespun.n.01', 'name': 'homespun'}, {'id': 8411, 'synset': 'homestead.n.03', 'name': 'homestead'}, {'id': 8412, 'synset': 'home_theater.n.01', 'name': 'home_theater'}, {'id': 8413, 'synset': 'homing_torpedo.n.01', 'name': 'homing_torpedo'}, {'id': 8414, 'synset': 'hone.n.01', 'name': 'hone'}, {'id': 8415, 'synset': 'honeycomb.n.02', 'name': 'honeycomb'}, {'id': 8416, 'synset': 'hood.n.09', 'name': 'hood'}, {'id': 8417, 'synset': 'hood.n.08', 'name': 'hood'}, {'id': 8418, 'synset': 'hood.n.07', 'name': 'hood'}, {'id': 8419, 'synset': 'hood.n.05', 'name': 'hood'}, {'id': 8420, 'synset': 'hood_latch.n.01', 'name': 'hood_latch'}, {'id': 8421, 'synset': 'hook.n.04', 'name': 'hook'}, {'id': 8422, 'synset': 'hook.n.01', 'name': 'hook'}, {'id': 8423, 'synset': 'hook_and_eye.n.01', 'name': 'hook_and_eye'}, {'id': 8424, 'synset': 'hookup.n.02', 'name': 'hookup'}, {'id': 8425, 'synset': 'hookup.n.01', 'name': 'hookup'}, {'id': 8426, 'synset': 'hook_wrench.n.01', 'name': 'hook_wrench'}, {'id': 8427, 'synset': 'hoopskirt.n.01', 'name': 'hoopskirt'}, {'id': 8428, 'synset': 'hoosegow.n.01', 'name': 'hoosegow'}, {'id': 8429, 'synset': 'hoover.n.04', 'name': 'Hoover'}, {'id': 8430, 'synset': 'hope_chest.n.01', 'name': 'hope_chest'}, {'id': 8431, 'synset': 'hopper.n.01', 'name': 'hopper'}, {'id': 8432, 'synset': 'hopsacking.n.01', 'name': 'hopsacking'}, {'id': 8433, 'synset': 'horizontal_bar.n.01', 'name': 'horizontal_bar'}, {'id': 8434, 'synset': 'horizontal_stabilizer.n.01', 'name': 'horizontal_stabilizer'}, {'id': 8435, 'synset': 'horizontal_tail.n.01', 'name': 'horizontal_tail'}, {'id': 8436, 'synset': 'horn.n.09', 'name': 'horn'}, {'id': 8437, 'synset': 'horn.n.01', 'name': 'horn'}, {'id': 8438, 'synset': 'horn.n.08', 'name': 'horn'}, {'id': 8439, 'synset': 'horn_button.n.01', 'name': 'horn_button'}, {'id': 8440, 'synset': 'hornpipe.n.03', 'name': 'hornpipe'}, {'id': 8441, 'synset': 'horse.n.02', 'name': 'horse'}, {'id': 8442, 'synset': 'horsebox.n.01', 'name': 'horsebox'}, {'id': 8443, 'synset': 'horsecar.n.01', 'name': 'horsecar'}, {'id': 8444, 'synset': 'horse_cart.n.01', 'name': 'horse_cart'}, {'id': 8445, 'synset': 'horsecloth.n.01', 'name': 'horsecloth'}, {'id': 8446, 'synset': 'horse-drawn_vehicle.n.01', 'name': 'horse-drawn_vehicle'}, {'id': 8447, 'synset': 'horsehair.n.02', 'name': 'horsehair'}, {'id': 8448, 'synset': 'horsehair_wig.n.01', 'name': 'horsehair_wig'}, {'id': 8449, 'synset': 'horseless_carriage.n.01', 'name': 'horseless_carriage'}, {'id': 8450, 'synset': 'horse_pistol.n.01', 'name': 'horse_pistol'}, {'id': 8451, 'synset': 'horseshoe.n.02', 'name': 'horseshoe'}, {'id': 8452, 'synset': 'horseshoe.n.01', 'name': 'horseshoe'}, {'id': 8453, 'synset': 'horse-trail.n.01', 'name': 'horse-trail'}, {'id': 8454, 'synset': 'horsewhip.n.01', 'name': 'horsewhip'}, {'id': 8455, 'synset': 'hose.n.02', 'name': 'hose'}, {'id': 8456, 'synset': 'hosiery.n.01', 'name': 'hosiery'}, {'id': 8457, 'synset': 'hospice.n.01', 'name': 'hospice'}, {'id': 8458, 'synset': 'hospital.n.01', 'name': 'hospital'}, {'id': 8459, 'synset': 'hospital_bed.n.01', 'name': 'hospital_bed'}, {'id': 8460, 'synset': 'hospital_room.n.01', 'name': 'hospital_room'}, {'id': 8461, 'synset': 'hospital_ship.n.01', 'name': 'hospital_ship'}, {'id': 8462, 'synset': 'hospital_train.n.01', 'name': 'hospital_train'}, {'id': 8463, 'synset': 'hostel.n.02', 'name': 'hostel'}, {'id': 8464, 'synset': 'hostel.n.01', 'name': 'hostel'}, {'id': 8465, 'synset': 'hotel.n.01', 'name': 'hotel'}, {'id': 8466, 'synset': 'hotel-casino.n.02', 'name': 'hotel-casino'}, {'id': 8467, 'synset': 'hotel-casino.n.01', 'name': 'hotel-casino'}, {'id': 8468, 'synset': 'hotel_room.n.01', 'name': 'hotel_room'}, {'id': 8469, 'synset': 'hot_line.n.01', 'name': 'hot_line'}, {'id': 8470, 'synset': 'hot_pants.n.02', 'name': 'hot_pants'}, {'id': 8471, 'synset': 'hot_rod.n.01', 'name': 'hot_rod'}, {'id': 8472, 'synset': 'hot_spot.n.03', 'name': 'hot_spot'}, {'id': 8473, 'synset': 'hot_tub.n.01', 'name': 'hot_tub'}, {'id': 8474, 'synset': 'hot-water_bottle.n.01', 'name': 'hot-water_bottle'}, {'id': 8475, 'synset': 'houndstooth_check.n.01', 'name': 'houndstooth_check'}, {'id': 8476, 'synset': 'hour_hand.n.01', 'name': 'hour_hand'}, {'id': 8477, 'synset': 'house.n.01', 'name': 'house'}, {'id': 8478, 'synset': 'house.n.12', 'name': 'house'}, {'id': 8479, 'synset': 'houselights.n.01', 'name': 'houselights'}, {'id': 8480, 'synset': 'house_of_cards.n.02', 'name': 'house_of_cards'}, {'id': 8481, 'synset': 'house_of_correction.n.01', 'name': 'house_of_correction'}, {'id': 8482, 'synset': 'house_paint.n.01', 'name': 'house_paint'}, {'id': 8483, 'synset': 'housetop.n.01', 'name': 'housetop'}, {'id': 8484, 'synset': 'housing.n.01', 'name': 'housing'}, {'id': 8485, 'synset': 'hovel.n.01', 'name': 'hovel'}, {'id': 8486, 'synset': 'hovercraft.n.01', 'name': 'hovercraft'}, {'id': 8487, 'synset': 'howdah.n.01', 'name': 'howdah'}, {'id': 8488, 'synset': 'huarache.n.01', 'name': 'huarache'}, {'id': 8489, 'synset': 'hub-and-spoke.n.01', 'name': 'hub-and-spoke'}, {'id': 8490, 'synset': 'hubcap.n.01', 'name': 'hubcap'}, {'id': 8491, 'synset': 'huck.n.01', 'name': 'huck'}, {'id': 8492, 'synset': 'hug-me-tight.n.01', 'name': 'hug-me-tight'}, {'id': 8493, 'synset': 'hula-hoop.n.01', 'name': 'hula-hoop'}, {'id': 8494, 'synset': 'hulk.n.02', 'name': 'hulk'}, {'id': 8495, 'synset': 'hull.n.06', 'name': 'hull'}, {'id': 8496, 'synset': 'humeral_veil.n.01', 'name': 'humeral_veil'}, {'id': 8497, 'synset': 'humvee.n.01', 'name': 'Humvee'}, {'id': 8498, 'synset': 'hunter.n.04', 'name': 'hunter'}, {'id': 8499, 'synset': 'hunting_knife.n.01', 'name': 'hunting_knife'}, {'id': 8500, 'synset': 'hurdle.n.01', 'name': 'hurdle'}, {'id': 8501, 'synset': 'hurricane_deck.n.01', 'name': 'hurricane_deck'}, {'id': 8502, 'synset': 'hurricane_lamp.n.01', 'name': 'hurricane_lamp'}, {'id': 8503, 'synset': 'hut.n.01', 'name': 'hut'}, {'id': 8504, 'synset': 'hutch.n.01', 'name': 'hutch'}, {'id': 8505, 'synset': 'hutment.n.01', 'name': 'hutment'}, {'id': 8506, 'synset': 'hydraulic_brake.n.01', 'name': 'hydraulic_brake'}, {'id': 8507, 'synset': 'hydraulic_press.n.01', 'name': 'hydraulic_press'}, {'id': 8508, 'synset': 'hydraulic_pump.n.01', 'name': 'hydraulic_pump'}, {'id': 8509, 'synset': 'hydraulic_system.n.01', 'name': 'hydraulic_system'}, {'id': 8510, 'synset': 'hydraulic_transmission.n.01', 'name': 'hydraulic_transmission'}, {'id': 8511, 'synset': 'hydroelectric_turbine.n.01', 'name': 'hydroelectric_turbine'}, {'id': 8512, 'synset': 'hydrofoil.n.02', 'name': 'hydrofoil'}, {'id': 8513, 'synset': 'hydrofoil.n.01', 'name': 'hydrofoil'}, {'id': 8514, 'synset': 'hydrogen_bomb.n.01', 'name': 'hydrogen_bomb'}, {'id': 8515, 'synset': 'hydrometer.n.01', 'name': 'hydrometer'}, {'id': 8516, 'synset': 'hygrodeik.n.01', 'name': 'hygrodeik'}, {'id': 8517, 'synset': 'hygrometer.n.01', 'name': 'hygrometer'}, {'id': 8518, 'synset': 'hygroscope.n.01', 'name': 'hygroscope'}, {'id': 8519, 'synset': 'hyperbaric_chamber.n.01', 'name': 'hyperbaric_chamber'}, {'id': 8520, 'synset': 'hypercoaster.n.01', 'name': 'hypercoaster'}, {'id': 8521, 'synset': 'hypermarket.n.01', 'name': 'hypermarket'}, {'id': 8522, 'synset': 'hypodermic_needle.n.01', 'name': 'hypodermic_needle'}, {'id': 8523, 'synset': 'hypodermic_syringe.n.01', 'name': 'hypodermic_syringe'}, {'id': 8524, 'synset': 'hypsometer.n.01', 'name': 'hypsometer'}, {'id': 8525, 'synset': 'hysterosalpingogram.n.01', 'name': 'hysterosalpingogram'}, {'id': 8526, 'synset': 'i-beam.n.01', 'name': 'I-beam'}, {'id': 8527, 'synset': 'ice_ax.n.01', 'name': 'ice_ax'}, {'id': 8528, 'synset': 'iceboat.n.02', 'name': 'iceboat'}, {'id': 8529, 'synset': 'icebreaker.n.01', 'name': 'icebreaker'}, {'id': 8530, 'synset': 'iced-tea_spoon.n.01', 'name': 'iced-tea_spoon'}, {'id': 8531, 'synset': 'ice_hockey_rink.n.01', 'name': 'ice_hockey_rink'}, {'id': 8532, 'synset': 'ice_machine.n.01', 'name': 'ice_machine'}, {'id': 8533, 'synset': 'icepick.n.01', 'name': 'icepick'}, {'id': 8534, 'synset': 'ice_rink.n.01', 'name': 'ice_rink'}, {'id': 8535, 'synset': 'ice_tongs.n.01', 'name': 'ice_tongs'}, {'id': 8536, 'synset': 'icetray.n.01', 'name': 'icetray'}, {'id': 8537, 'synset': 'iconoscope.n.01', 'name': 'iconoscope'}, {'id': 8538, 'synset': 'identikit.n.01', 'name': 'Identikit'}, {'id': 8539, 'synset': 'idle_pulley.n.01', 'name': 'idle_pulley'}, {'id': 8540, 'synset': 'igloo.n.01', 'name': 'igloo'}, {'id': 8541, 'synset': 'ignition_coil.n.01', 'name': 'ignition_coil'}, {'id': 8542, 'synset': 'ignition_key.n.01', 'name': 'ignition_key'}, {'id': 8543, 'synset': 'ignition_switch.n.01', 'name': 'ignition_switch'}, {'id': 8544, 'synset': 'imaret.n.01', 'name': 'imaret'}, {'id': 8545, 'synset': 'immovable_bandage.n.01', 'name': 'immovable_bandage'}, {'id': 8546, 'synset': 'impact_printer.n.01', 'name': 'impact_printer'}, {'id': 8547, 'synset': 'impeller.n.01', 'name': 'impeller'}, {'id': 8548, 'synset': 'implant.n.01', 'name': 'implant'}, {'id': 8549, 'synset': 'implement.n.01', 'name': 'implement'}, {'id': 8550, 'synset': 'impression.n.07', 'name': 'impression'}, {'id': 8551, 'synset': 'imprint.n.05', 'name': 'imprint'}, {'id': 8552, 'synset': 'improvised_explosive_device.n.01', 'name': 'improvised_explosive_device'}, {'id': 8553, 'synset': 'impulse_turbine.n.01', 'name': 'impulse_turbine'}, {'id': 8554, 'synset': 'in-basket.n.01', 'name': 'in-basket'}, {'id': 8555, 'synset': 'incendiary_bomb.n.01', 'name': 'incendiary_bomb'}, {'id': 8556, 'synset': 'incinerator.n.01', 'name': 'incinerator'}, {'id': 8557, 'synset': 'inclined_plane.n.01', 'name': 'inclined_plane'}, {'id': 8558, 'synset': 'inclinometer.n.02', 'name': 'inclinometer'}, {'id': 8559, 'synset': 'inclinometer.n.01', 'name': 'inclinometer'}, {'id': 8560, 'synset': 'incrustation.n.03', 'name': 'incrustation'}, {'id': 8561, 'synset': 'incubator.n.01', 'name': 'incubator'}, {'id': 8562, 'synset': 'index_register.n.01', 'name': 'index_register'}, {'id': 8563, 'synset': 'indiaman.n.01', 'name': 'Indiaman'}, {'id': 8564, 'synset': 'indian_club.n.01', 'name': 'Indian_club'}, {'id': 8565, 'synset': 'indicator.n.03', 'name': 'indicator'}, {'id': 8566, 'synset': 'induction_coil.n.01', 'name': 'induction_coil'}, {'id': 8567, 'synset': 'inductor.n.01', 'name': 'inductor'}, {'id': 8568, 'synset': 'industrial_watercourse.n.01', 'name': 'industrial_watercourse'}, {'id': 8569, 'synset': 'inertial_guidance_system.n.01', 'name': 'inertial_guidance_system'}, {'id': 8570, 'synset': 'inflater.n.01', 'name': 'inflater'}, {'id': 8571, 'synset': 'injector.n.01', 'name': 'injector'}, {'id': 8572, 'synset': 'ink_bottle.n.01', 'name': 'ink_bottle'}, {'id': 8573, 'synset': 'ink_eraser.n.01', 'name': 'ink_eraser'}, {'id': 8574, 'synset': 'ink-jet_printer.n.01', 'name': 'ink-jet_printer'}, {'id': 8575, 'synset': 'inkle.n.01', 'name': 'inkle'}, {'id': 8576, 'synset': 'inkstand.n.02', 'name': 'inkstand'}, {'id': 8577, 'synset': 'inkwell.n.01', 'name': 'inkwell'}, {'id': 8578, 'synset': 'inlay.n.01', 'name': 'inlay'}, {'id': 8579, 'synset': 'inside_caliper.n.01', 'name': 'inside_caliper'}, {'id': 8580, 'synset': 'insole.n.01', 'name': 'insole'}, {'id': 8581, 'synset': 'instep.n.02', 'name': 'instep'}, {'id': 8582, 'synset': 'instillator.n.01', 'name': 'instillator'}, {'id': 8583, 'synset': 'institution.n.02', 'name': 'institution'}, {'id': 8584, 'synset': 'instrument.n.01', 'name': 'instrument'}, {'id': 8585, 'synset': 'instrument_of_punishment.n.01', 'name': 'instrument_of_punishment'}, {'id': 8586, 'synset': 'instrument_of_torture.n.01', 'name': 'instrument_of_torture'}, {'id': 8587, 'synset': 'intaglio.n.02', 'name': 'intaglio'}, {'id': 8588, 'synset': 'intake_valve.n.01', 'name': 'intake_valve'}, {'id': 8589, 'synset': 'integrated_circuit.n.01', 'name': 'integrated_circuit'}, {'id': 8590, 'synset': 'integrator.n.01', 'name': 'integrator'}, {'id': 8591, 'synset': 'intelnet.n.01', 'name': 'Intelnet'}, {'id': 8592, 'synset': 'interceptor.n.01', 'name': 'interceptor'}, {'id': 8593, 'synset': 'interchange.n.01', 'name': 'interchange'}, {'id': 8594, 'synset': 'intercommunication_system.n.01', 'name': 'intercommunication_system'}, {'id': 8595, 'synset': 'intercontinental_ballistic_missile.n.01', 'name': 'intercontinental_ballistic_missile'}, {'id': 8596, 'synset': 'interface.n.04', 'name': 'interface'}, {'id': 8597, 'synset': 'interferometer.n.01', 'name': 'interferometer'}, {'id': 8598, 'synset': 'interior_door.n.01', 'name': 'interior_door'}, {'id': 8599, 'synset': 'internal-combustion_engine.n.01', 'name': 'internal-combustion_engine'}, {'id': 8600, 'synset': 'internal_drive.n.01', 'name': 'internal_drive'}, {'id': 8601, 'synset': 'internet.n.01', 'name': 'internet'}, {'id': 8602, 'synset': 'interphone.n.01', 'name': 'interphone'}, {'id': 8603, 'synset': 'interrupter.n.01', 'name': 'interrupter'}, {'id': 8604, 'synset': 'intersection.n.02', 'name': 'intersection'}, {'id': 8605, 'synset': 'interstice.n.02', 'name': 'interstice'}, {'id': 8606, 'synset': 'intraocular_lens.n.01', 'name': 'intraocular_lens'}, {'id': 8607, 'synset': 'intravenous_pyelogram.n.01', 'name': 'intravenous_pyelogram'}, {'id': 8608, 'synset': 'inverter.n.01', 'name': 'inverter'}, {'id': 8609, 'synset': 'ion_engine.n.01', 'name': 'ion_engine'}, {'id': 8610, 'synset': 'ionization_chamber.n.01', 'name': 'ionization_chamber'}, {'id': 8611, 'synset': 'video_ipod.n.01', 'name': 'video_iPod'}, {'id': 8612, 'synset': 'iron.n.02', 'name': 'iron'}, {'id': 8613, 'synset': 'iron.n.03', 'name': 'iron'}, {'id': 8614, 'synset': 'irons.n.01', 'name': 'irons'}, {'id': 8615, 'synset': 'ironclad.n.01', 'name': 'ironclad'}, {'id': 8616, 'synset': 'iron_foundry.n.01', 'name': 'iron_foundry'}, {'id': 8617, 'synset': 'iron_horse.n.01', 'name': 'iron_horse'}, {'id': 8618, 'synset': 'ironing.n.01', 'name': 'ironing'}, {'id': 8619, 'synset': 'iron_lung.n.01', 'name': 'iron_lung'}, {'id': 8620, 'synset': 'ironmongery.n.01', 'name': 'ironmongery'}, {'id': 8621, 'synset': 'ironworks.n.01', 'name': 'ironworks'}, {'id': 8622, 'synset': 'irrigation_ditch.n.01', 'name': 'irrigation_ditch'}, {'id': 8623, 'synset': 'izar.n.01', 'name': 'izar'}, {'id': 8624, 'synset': 'jabot.n.01', 'name': 'jabot'}, {'id': 8625, 'synset': 'jack.n.10', 'name': 'jack'}, {'id': 8626, 'synset': 'jack.n.07', 'name': 'jack'}, {'id': 8627, 'synset': 'jack.n.06', 'name': 'jack'}, {'id': 8628, 'synset': 'jack.n.05', 'name': 'jack'}, {'id': 8629, 'synset': 'jacket.n.02', 'name': 'jacket'}, {'id': 8630, 'synset': 'jacket.n.05', 'name': 'jacket'}, {'id': 8631, 'synset': 'jack-in-the-box.n.01', 'name': 'jack-in-the-box'}, {'id': 8632, 'synset': "jack-o'-lantern.n.02", 'name': "jack-o'-lantern"}, {'id': 8633, 'synset': 'jack_plane.n.01', 'name': 'jack_plane'}, {'id': 8634, 'synset': "jacob's_ladder.n.02", 'name': "Jacob's_ladder"}, {'id': 8635, 'synset': 'jaconet.n.01', 'name': 'jaconet'}, {'id': 8636, 'synset': 'jacquard_loom.n.01', 'name': 'Jacquard_loom'}, {'id': 8637, 'synset': 'jacquard.n.02', 'name': 'jacquard'}, {'id': 8638, 'synset': 'jag.n.03', 'name': 'jag'}, {'id': 8639, 'synset': 'jail.n.01', 'name': 'jail'}, {'id': 8640, 'synset': 'jalousie.n.02', 'name': 'jalousie'}, {'id': 8641, 'synset': 'jamb.n.01', 'name': 'jamb'}, {'id': 8642, 'synset': 'jammer.n.01', 'name': 'jammer'}, {'id': 8643, 'synset': 'jampot.n.01', 'name': 'jampot'}, {'id': 8644, 'synset': 'japan.n.04', 'name': 'japan'}, {'id': 8645, 'synset': 'jarvik_heart.n.01', 'name': 'Jarvik_heart'}, {'id': 8646, 'synset': 'jaunting_car.n.01', 'name': 'jaunting_car'}, {'id': 8647, 'synset': 'javelin.n.02', 'name': 'javelin'}, {'id': 8648, 'synset': 'jaw.n.03', 'name': 'jaw'}, {'id': 8649, 'synset': 'jaws_of_life.n.01', 'name': 'Jaws_of_Life'}, {'id': 8650, 'synset': 'jellaba.n.01', 'name': 'jellaba'}, {'id': 8651, 'synset': 'jerkin.n.01', 'name': 'jerkin'}, {'id': 8652, 'synset': 'jeroboam.n.02', 'name': 'jeroboam'}, {'id': 8653, 'synset': 'jersey.n.04', 'name': 'jersey'}, {'id': 8654, 'synset': 'jet_bridge.n.01', 'name': 'jet_bridge'}, {'id': 8655, 'synset': 'jet_engine.n.01', 'name': 'jet_engine'}, {'id': 8656, 'synset': 'jetliner.n.01', 'name': 'jetliner'}, {'id': 8657, 'synset': "jeweler's_glass.n.01", 'name': "jeweler's_glass"}, {'id': 8658, 'synset': 'jewelled_headdress.n.01', 'name': 'jewelled_headdress'}, {'id': 8659, 'synset': "jew's_harp.n.01", 'name': "jew's_harp"}, {'id': 8660, 'synset': 'jib.n.01', 'name': 'jib'}, {'id': 8661, 'synset': 'jibboom.n.01', 'name': 'jibboom'}, {'id': 8662, 'synset': 'jig.n.03', 'name': 'jig'}, {'id': 8663, 'synset': 'jig.n.02', 'name': 'jig'}, {'id': 8664, 'synset': 'jiggermast.n.01', 'name': 'jiggermast'}, {'id': 8665, 'synset': 'jigsaw.n.02', 'name': 'jigsaw'}, {'id': 8666, 'synset': 'jigsaw_puzzle.n.01', 'name': 'jigsaw_puzzle'}, {'id': 8667, 'synset': 'jinrikisha.n.01', 'name': 'jinrikisha'}, {'id': 8668, 'synset': 'jobcentre.n.01', 'name': 'jobcentre'}, {'id': 8669, 'synset': 'jodhpurs.n.01', 'name': 'jodhpurs'}, {'id': 8670, 'synset': 'jodhpur.n.01', 'name': 'jodhpur'}, {'id': 8671, 'synset': 'joinery.n.01', 'name': 'joinery'}, {'id': 8672, 'synset': 'joint.n.05', 'name': 'joint'}, {'id': 8673, 'synset': 'joint_direct_attack_munition.n.01', 'name': 'Joint_Direct_Attack_Munition'}, {'id': 8674, 'synset': 'jointer.n.01', 'name': 'jointer'}, {'id': 8675, 'synset': 'joist.n.01', 'name': 'joist'}, {'id': 8676, 'synset': 'jolly_boat.n.01', 'name': 'jolly_boat'}, {'id': 8677, 'synset': 'jorum.n.01', 'name': 'jorum'}, {'id': 8678, 'synset': 'joss_house.n.01', 'name': 'joss_house'}, {'id': 8679, 'synset': 'journal_bearing.n.01', 'name': 'journal_bearing'}, {'id': 8680, 'synset': 'journal_box.n.01', 'name': 'journal_box'}, {'id': 8681, 'synset': 'jungle_gym.n.01', 'name': 'jungle_gym'}, {'id': 8682, 'synset': 'junk.n.02', 'name': 'junk'}, {'id': 8683, 'synset': 'jug.n.01', 'name': 'jug'}, {'id': 8684, 'synset': 'jukebox.n.01', 'name': 'jukebox'}, {'id': 8685, 'synset': 'jumbojet.n.01', 'name': 'jumbojet'}, {'id': 8686, 'synset': 'jumper.n.07', 'name': 'jumper'}, {'id': 8687, 'synset': 'jumper.n.06', 'name': 'jumper'}, {'id': 8688, 'synset': 'jumper.n.05', 'name': 'jumper'}, {'id': 8689, 'synset': 'jumper.n.04', 'name': 'jumper'}, {'id': 8690, 'synset': 'jumper_cable.n.01', 'name': 'jumper_cable'}, {'id': 8691, 'synset': 'jump_seat.n.01', 'name': 'jump_seat'}, {'id': 8692, 'synset': 'jump_suit.n.02', 'name': 'jump_suit'}, {'id': 8693, 'synset': 'junction.n.01', 'name': 'junction'}, {'id': 8694, 'synset': 'junction.n.04', 'name': 'junction'}, {'id': 8695, 'synset': 'junction_barrier.n.01', 'name': 'junction_barrier'}, {'id': 8696, 'synset': 'junk_shop.n.01', 'name': 'junk_shop'}, {'id': 8697, 'synset': 'jury_box.n.01', 'name': 'jury_box'}, {'id': 8698, 'synset': 'jury_mast.n.01', 'name': 'jury_mast'}, {'id': 8699, 'synset': 'kachina.n.03', 'name': 'kachina'}, {'id': 8700, 'synset': 'kaffiyeh.n.01', 'name': 'kaffiyeh'}, {'id': 8701, 'synset': 'kalansuwa.n.01', 'name': 'kalansuwa'}, {'id': 8702, 'synset': 'kalashnikov.n.01', 'name': 'Kalashnikov'}, {'id': 8703, 'synset': 'kameez.n.01', 'name': 'kameez'}, {'id': 8704, 'synset': 'kanzu.n.01', 'name': 'kanzu'}, {'id': 8705, 'synset': 'katharometer.n.01', 'name': 'katharometer'}, {'id': 8706, 'synset': 'kazoo.n.01', 'name': 'kazoo'}, {'id': 8707, 'synset': 'keel.n.03', 'name': 'keel'}, {'id': 8708, 'synset': 'keelboat.n.01', 'name': 'keelboat'}, {'id': 8709, 'synset': 'keelson.n.01', 'name': 'keelson'}, {'id': 8710, 'synset': 'keep.n.02', 'name': 'keep'}, {'id': 8711, 'synset': 'kepi.n.01', 'name': 'kepi'}, {'id': 8712, 'synset': 'keratoscope.n.01', 'name': 'keratoscope'}, {'id': 8713, 'synset': 'kerchief.n.01', 'name': 'kerchief'}, {'id': 8714, 'synset': 'ketch.n.01', 'name': 'ketch'}, {'id': 8715, 'synset': 'kettle.n.04', 'name': 'kettle'}, {'id': 8716, 'synset': 'key.n.15', 'name': 'key'}, {'id': 8717, 'synset': 'keyboard.n.01', 'name': 'keyboard'}, {'id': 8718, 'synset': 'keyboard_buffer.n.01', 'name': 'keyboard_buffer'}, {'id': 8719, 'synset': 'keyboard_instrument.n.01', 'name': 'keyboard_instrument'}, {'id': 8720, 'synset': 'keyhole.n.01', 'name': 'keyhole'}, {'id': 8721, 'synset': 'keyhole_saw.n.01', 'name': 'keyhole_saw'}, {'id': 8722, 'synset': 'khadi.n.01', 'name': 'khadi'}, {'id': 8723, 'synset': 'khaki.n.01', 'name': 'khaki'}, {'id': 8724, 'synset': 'khakis.n.01', 'name': 'khakis'}, {'id': 8725, 'synset': 'khimar.n.01', 'name': 'khimar'}, {'id': 8726, 'synset': 'khukuri.n.01', 'name': 'khukuri'}, {'id': 8727, 'synset': 'kick_pleat.n.01', 'name': 'kick_pleat'}, {'id': 8728, 'synset': 'kicksorter.n.01', 'name': 'kicksorter'}, {'id': 8729, 'synset': 'kickstand.n.01', 'name': 'kickstand'}, {'id': 8730, 'synset': 'kick_starter.n.01', 'name': 'kick_starter'}, {'id': 8731, 'synset': 'kid_glove.n.01', 'name': 'kid_glove'}, {'id': 8732, 'synset': 'kiln.n.01', 'name': 'kiln'}, {'id': 8733, 'synset': 'kinescope.n.01', 'name': 'kinescope'}, {'id': 8734, 'synset': 'kinetoscope.n.01', 'name': 'Kinetoscope'}, {'id': 8735, 'synset': 'king.n.10', 'name': 'king'}, {'id': 8736, 'synset': 'king.n.08', 'name': 'king'}, {'id': 8737, 'synset': 'kingbolt.n.01', 'name': 'kingbolt'}, {'id': 8738, 'synset': 'king_post.n.01', 'name': 'king_post'}, {'id': 8739, 'synset': "kipp's_apparatus.n.01", 'name': "Kipp's_apparatus"}, {'id': 8740, 'synset': 'kirk.n.01', 'name': 'kirk'}, {'id': 8741, 'synset': 'kirpan.n.01', 'name': 'kirpan'}, {'id': 8742, 'synset': 'kirtle.n.02', 'name': 'kirtle'}, {'id': 8743, 'synset': 'kirtle.n.01', 'name': 'kirtle'}, {'id': 8744, 'synset': 'kit.n.02', 'name': 'kit'}, {'id': 8745, 'synset': 'kit.n.01', 'name': 'kit'}, {'id': 8746, 'synset': 'kitbag.n.01', 'name': 'kitbag'}, {'id': 8747, 'synset': 'kitchen.n.01', 'name': 'kitchen'}, {'id': 8748, 'synset': 'kitchen_appliance.n.01', 'name': 'kitchen_appliance'}, {'id': 8749, 'synset': 'kitchenette.n.01', 'name': 'kitchenette'}, {'id': 8750, 'synset': 'kitchen_utensil.n.01', 'name': 'kitchen_utensil'}, {'id': 8751, 'synset': 'kitchenware.n.01', 'name': 'kitchenware'}, {'id': 8752, 'synset': 'kite_balloon.n.01', 'name': 'kite_balloon'}, {'id': 8753, 'synset': 'klaxon.n.01', 'name': 'klaxon'}, {'id': 8754, 'synset': 'klieg_light.n.01', 'name': 'klieg_light'}, {'id': 8755, 'synset': 'klystron.n.01', 'name': 'klystron'}, {'id': 8756, 'synset': 'knee_brace.n.01', 'name': 'knee_brace'}, {'id': 8757, 'synset': 'knee-high.n.01', 'name': 'knee-high'}, {'id': 8758, 'synset': 'knee_piece.n.01', 'name': 'knee_piece'}, {'id': 8759, 'synset': 'knife.n.02', 'name': 'knife'}, {'id': 8760, 'synset': 'knife_blade.n.01', 'name': 'knife_blade'}, {'id': 8761, 'synset': 'knight.n.02', 'name': 'knight'}, {'id': 8762, 'synset': 'knit.n.01', 'name': 'knit'}, {'id': 8763, 'synset': 'knitting_machine.n.01', 'name': 'knitting_machine'}, {'id': 8764, 'synset': 'knitwear.n.01', 'name': 'knitwear'}, {'id': 8765, 'synset': 'knob.n.01', 'name': 'knob'}, {'id': 8766, 'synset': 'knob.n.04', 'name': 'knob'}, {'id': 8767, 'synset': 'knobble.n.01', 'name': 'knobble'}, {'id': 8768, 'synset': 'knobkerrie.n.01', 'name': 'knobkerrie'}, {'id': 8769, 'synset': 'knot.n.02', 'name': 'knot'}, {'id': 8770, 'synset': 'knuckle_joint.n.02', 'name': 'knuckle_joint'}, {'id': 8771, 'synset': 'kohl.n.01', 'name': 'kohl'}, {'id': 8772, 'synset': 'koto.n.01', 'name': 'koto'}, {'id': 8773, 'synset': 'kraal.n.02', 'name': 'kraal'}, {'id': 8774, 'synset': 'kremlin.n.02', 'name': 'kremlin'}, {'id': 8775, 'synset': 'kris.n.01', 'name': 'kris'}, {'id': 8776, 'synset': 'krummhorn.n.01', 'name': 'krummhorn'}, {'id': 8777, 'synset': "kundt's_tube.n.01", 'name': "Kundt's_tube"}, {'id': 8778, 'synset': 'kurdistan.n.02', 'name': 'Kurdistan'}, {'id': 8779, 'synset': 'kurta.n.01', 'name': 'kurta'}, {'id': 8780, 'synset': 'kylix.n.01', 'name': 'kylix'}, {'id': 8781, 'synset': 'kymograph.n.01', 'name': 'kymograph'}, {'id': 8782, 'synset': 'lab_bench.n.01', 'name': 'lab_bench'}, {'id': 8783, 'synset': 'lace.n.02', 'name': 'lace'}, {'id': 8784, 'synset': 'lacquer.n.02', 'name': 'lacquer'}, {'id': 8785, 'synset': 'lacquerware.n.01', 'name': 'lacquerware'}, {'id': 8786, 'synset': 'lacrosse_ball.n.01', 'name': 'lacrosse_ball'}, {'id': 8787, 'synset': 'ladder-back.n.02', 'name': 'ladder-back'}, {'id': 8788, 'synset': 'ladder-back.n.01', 'name': 'ladder-back'}, {'id': 8789, 'synset': 'ladder_truck.n.01', 'name': 'ladder_truck'}, {'id': 8790, 'synset': "ladies'_room.n.01", 'name': "ladies'_room"}, {'id': 8791, 'synset': 'lady_chapel.n.01', 'name': 'lady_chapel'}, {'id': 8792, 'synset': 'lagerphone.n.01', 'name': 'lagerphone'}, {'id': 8793, 'synset': 'lag_screw.n.01', 'name': 'lag_screw'}, {'id': 8794, 'synset': 'lake_dwelling.n.01', 'name': 'lake_dwelling'}, {'id': 8795, 'synset': 'lally.n.01', 'name': 'lally'}, {'id': 8796, 'synset': 'lamasery.n.01', 'name': 'lamasery'}, {'id': 8797, 'synset': 'lambrequin.n.02', 'name': 'lambrequin'}, {'id': 8798, 'synset': 'lame.n.02', 'name': 'lame'}, {'id': 8799, 'synset': 'laminar_flow_clean_room.n.01', 'name': 'laminar_flow_clean_room'}, {'id': 8800, 'synset': 'laminate.n.01', 'name': 'laminate'}, {'id': 8801, 'synset': 'lamination.n.01', 'name': 'lamination'}, {'id': 8802, 'synset': 'lamp.n.01', 'name': 'lamp'}, {'id': 8803, 'synset': 'lamp_house.n.01', 'name': 'lamp_house'}, {'id': 8804, 'synset': 'lanai.n.02', 'name': 'lanai'}, {'id': 8805, 'synset': 'lancet_arch.n.01', 'name': 'lancet_arch'}, {'id': 8806, 'synset': 'lancet_window.n.01', 'name': 'lancet_window'}, {'id': 8807, 'synset': 'landau.n.02', 'name': 'landau'}, {'id': 8808, 'synset': 'lander.n.02', 'name': 'lander'}, {'id': 8809, 'synset': 'landing_craft.n.01', 'name': 'landing_craft'}, {'id': 8810, 'synset': 'landing_flap.n.01', 'name': 'landing_flap'}, {'id': 8811, 'synset': 'landing_gear.n.01', 'name': 'landing_gear'}, {'id': 8812, 'synset': 'landing_net.n.01', 'name': 'landing_net'}, {'id': 8813, 'synset': 'landing_skid.n.01', 'name': 'landing_skid'}, {'id': 8814, 'synset': 'land_line.n.01', 'name': 'land_line'}, {'id': 8815, 'synset': 'land_mine.n.01', 'name': 'land_mine'}, {'id': 8816, 'synset': 'land_office.n.01', 'name': 'land_office'}, {'id': 8817, 'synset': 'lanolin.n.02', 'name': 'lanolin'}, {'id': 8818, 'synset': 'lanyard.n.01', 'name': 'lanyard'}, {'id': 8819, 'synset': 'lap.n.03', 'name': 'lap'}, {'id': 8820, 'synset': 'laparoscope.n.01', 'name': 'laparoscope'}, {'id': 8821, 'synset': 'lapboard.n.01', 'name': 'lapboard'}, {'id': 8822, 'synset': 'lapel.n.01', 'name': 'lapel'}, {'id': 8823, 'synset': 'lap_joint.n.01', 'name': 'lap_joint'}, {'id': 8824, 'synset': 'laryngoscope.n.01', 'name': 'laryngoscope'}, {'id': 8825, 'synset': 'laser.n.01', 'name': 'laser'}, {'id': 8826, 'synset': 'laser-guided_bomb.n.01', 'name': 'laser-guided_bomb'}, {'id': 8827, 'synset': 'laser_printer.n.01', 'name': 'laser_printer'}, {'id': 8828, 'synset': 'lash.n.02', 'name': 'lash'}, {'id': 8829, 'synset': 'lashing.n.02', 'name': 'lashing'}, {'id': 8830, 'synset': 'lasso.n.02', 'name': 'lasso'}, {'id': 8831, 'synset': 'latch.n.01', 'name': 'latch'}, {'id': 8832, 'synset': 'latchet.n.01', 'name': 'latchet'}, {'id': 8833, 'synset': 'latchkey.n.01', 'name': 'latchkey'}, {'id': 8834, 'synset': 'lateen.n.01', 'name': 'lateen'}, {'id': 8835, 'synset': 'latex_paint.n.01', 'name': 'latex_paint'}, {'id': 8836, 'synset': 'lath.n.01', 'name': 'lath'}, {'id': 8837, 'synset': 'lathe.n.01', 'name': 'lathe'}, {'id': 8838, 'synset': 'latrine.n.01', 'name': 'latrine'}, {'id': 8839, 'synset': 'lattice.n.03', 'name': 'lattice'}, {'id': 8840, 'synset': 'launch.n.01', 'name': 'launch'}, {'id': 8841, 'synset': 'launcher.n.01', 'name': 'launcher'}, {'id': 8842, 'synset': 'laundry.n.01', 'name': 'laundry'}, {'id': 8843, 'synset': 'laundry_cart.n.01', 'name': 'laundry_cart'}, {'id': 8844, 'synset': 'laundry_truck.n.01', 'name': 'laundry_truck'}, {'id': 8845, 'synset': 'lavalava.n.01', 'name': 'lavalava'}, {'id': 8846, 'synset': 'lavaliere.n.01', 'name': 'lavaliere'}, {'id': 8847, 'synset': 'laver.n.02', 'name': 'laver'}, {'id': 8848, 'synset': 'lawn_chair.n.01', 'name': 'lawn_chair'}, {'id': 8849, 'synset': 'lawn_furniture.n.01', 'name': 'lawn_furniture'}, {'id': 8850, 'synset': 'layette.n.01', 'name': 'layette'}, {'id': 8851, 'synset': 'lead-acid_battery.n.01', 'name': 'lead-acid_battery'}, {'id': 8852, 'synset': 'lead-in.n.02', 'name': 'lead-in'}, {'id': 8853, 'synset': 'leading_rein.n.01', 'name': 'leading_rein'}, {'id': 8854, 'synset': 'lead_pencil.n.01', 'name': 'lead_pencil'}, {'id': 8855, 'synset': 'leaf_spring.n.01', 'name': 'leaf_spring'}, {'id': 8856, 'synset': 'lean-to.n.01', 'name': 'lean-to'}, {'id': 8857, 'synset': 'lean-to_tent.n.01', 'name': 'lean-to_tent'}, {'id': 8858, 'synset': 'leash.n.01', 'name': 'leash'}, {'id': 8859, 'synset': 'leatherette.n.01', 'name': 'leatherette'}, {'id': 8860, 'synset': 'leather_strip.n.01', 'name': 'leather_strip'}, {'id': 8861, 'synset': 'leclanche_cell.n.01', 'name': 'Leclanche_cell'}, {'id': 8862, 'synset': 'lectern.n.01', 'name': 'lectern'}, {'id': 8863, 'synset': 'lecture_room.n.01', 'name': 'lecture_room'}, {'id': 8864, 'synset': 'lederhosen.n.01', 'name': 'lederhosen'}, {'id': 8865, 'synset': 'ledger_board.n.01', 'name': 'ledger_board'}, {'id': 8866, 'synset': 'leg.n.07', 'name': 'leg'}, {'id': 8867, 'synset': 'leg.n.03', 'name': 'leg'}, {'id': 8868, 'synset': 'leiden_jar.n.01', 'name': 'Leiden_jar'}, {'id': 8869, 'synset': 'leisure_wear.n.01', 'name': 'leisure_wear'}, {'id': 8870, 'synset': 'lens.n.01', 'name': 'lens'}, {'id': 8871, 'synset': 'lens.n.05', 'name': 'lens'}, {'id': 8872, 'synset': 'lens_cap.n.01', 'name': 'lens_cap'}, {'id': 8873, 'synset': 'lens_implant.n.01', 'name': 'lens_implant'}, {'id': 8874, 'synset': 'leotard.n.01', 'name': 'leotard'}, {'id': 8875, 'synset': 'letter_case.n.01', 'name': 'letter_case'}, {'id': 8876, 'synset': 'letter_opener.n.01', 'name': 'letter_opener'}, {'id': 8877, 'synset': 'levee.n.03', 'name': 'levee'}, {'id': 8878, 'synset': 'level.n.05', 'name': 'level'}, {'id': 8879, 'synset': 'lever.n.01', 'name': 'lever'}, {'id': 8880, 'synset': 'lever.n.03', 'name': 'lever'}, {'id': 8881, 'synset': 'lever.n.02', 'name': 'lever'}, {'id': 8882, 'synset': 'lever_lock.n.01', 'name': 'lever_lock'}, {'id': 8883, 'synset': "levi's.n.01", 'name': "Levi's"}, {'id': 8884, 'synset': 'liberty_ship.n.01', 'name': 'Liberty_ship'}, {'id': 8885, 'synset': 'library.n.01', 'name': 'library'}, {'id': 8886, 'synset': 'library.n.05', 'name': 'library'}, {'id': 8887, 'synset': 'lid.n.02', 'name': 'lid'}, {'id': 8888, 'synset': 'liebig_condenser.n.01', 'name': 'Liebig_condenser'}, {'id': 8889, 'synset': 'lie_detector.n.01', 'name': 'lie_detector'}, {'id': 8890, 'synset': 'lifeboat.n.01', 'name': 'lifeboat'}, {'id': 8891, 'synset': 'life_office.n.01', 'name': 'life_office'}, {'id': 8892, 'synset': 'life_preserver.n.01', 'name': 'life_preserver'}, {'id': 8893, 'synset': 'life-support_system.n.02', 'name': 'life-support_system'}, {'id': 8894, 'synset': 'life-support_system.n.01', 'name': 'life-support_system'}, {'id': 8895, 'synset': 'lifting_device.n.01', 'name': 'lifting_device'}, {'id': 8896, 'synset': 'lift_pump.n.01', 'name': 'lift_pump'}, {'id': 8897, 'synset': 'ligament.n.02', 'name': 'ligament'}, {'id': 8898, 'synset': 'ligature.n.03', 'name': 'ligature'}, {'id': 8899, 'synset': 'light.n.02', 'name': 'light'}, {'id': 8900, 'synset': 'light_arm.n.01', 'name': 'light_arm'}, {'id': 8901, 'synset': 'light_circuit.n.01', 'name': 'light_circuit'}, {'id': 8902, 'synset': 'light-emitting_diode.n.01', 'name': 'light-emitting_diode'}, {'id': 8903, 'synset': 'lighter.n.02', 'name': 'lighter'}, {'id': 8904, 'synset': 'lighter-than-air_craft.n.01', 'name': 'lighter-than-air_craft'}, {'id': 8905, 'synset': 'light_filter.n.01', 'name': 'light_filter'}, {'id': 8906, 'synset': 'lighting.n.02', 'name': 'lighting'}, {'id': 8907, 'synset': 'light_machine_gun.n.01', 'name': 'light_machine_gun'}, {'id': 8908, 'synset': 'light_meter.n.01', 'name': 'light_meter'}, {'id': 8909, 'synset': 'light_microscope.n.01', 'name': 'light_microscope'}, {'id': 8910, 'synset': 'light_pen.n.01', 'name': 'light_pen'}, {'id': 8911, 'synset': 'lightship.n.01', 'name': 'lightship'}, {'id': 8912, 'synset': 'lilo.n.01', 'name': 'Lilo'}, {'id': 8913, 'synset': 'limber.n.01', 'name': 'limber'}, {'id': 8914, 'synset': 'limekiln.n.01', 'name': 'limekiln'}, {'id': 8915, 'synset': 'limiter.n.01', 'name': 'limiter'}, {'id': 8916, 'synset': 'linear_accelerator.n.01', 'name': 'linear_accelerator'}, {'id': 8917, 'synset': 'linen.n.01', 'name': 'linen'}, {'id': 8918, 'synset': 'line_printer.n.01', 'name': 'line_printer'}, {'id': 8919, 'synset': 'liner.n.04', 'name': 'liner'}, {'id': 8920, 'synset': 'liner.n.03', 'name': 'liner'}, {'id': 8921, 'synset': 'lingerie.n.01', 'name': 'lingerie'}, {'id': 8922, 'synset': 'lining.n.01', 'name': 'lining'}, {'id': 8923, 'synset': 'link.n.09', 'name': 'link'}, {'id': 8924, 'synset': 'linkage.n.03', 'name': 'linkage'}, {'id': 8925, 'synset': 'link_trainer.n.01', 'name': 'Link_trainer'}, {'id': 8926, 'synset': 'linocut.n.02', 'name': 'linocut'}, {'id': 8927, 'synset': 'linoleum_knife.n.01', 'name': 'linoleum_knife'}, {'id': 8928, 'synset': 'linotype.n.01', 'name': 'Linotype'}, {'id': 8929, 'synset': 'linsey-woolsey.n.01', 'name': 'linsey-woolsey'}, {'id': 8930, 'synset': 'linstock.n.01', 'name': 'linstock'}, {'id': 8931, 'synset': 'lion-jaw_forceps.n.01', 'name': 'lion-jaw_forceps'}, {'id': 8932, 'synset': 'lip-gloss.n.01', 'name': 'lip-gloss'}, {'id': 8933, 'synset': 'lipstick.n.01', 'name': 'lipstick'}, {'id': 8934, 'synset': 'liqueur_glass.n.01', 'name': 'liqueur_glass'}, {'id': 8935, 'synset': 'liquid_crystal_display.n.01', 'name': 'liquid_crystal_display'}, {'id': 8936, 'synset': 'liquid_metal_reactor.n.01', 'name': 'liquid_metal_reactor'}, {'id': 8937, 'synset': 'lisle.n.01', 'name': 'lisle'}, {'id': 8938, 'synset': 'lister.n.03', 'name': 'lister'}, {'id': 8939, 'synset': 'litterbin.n.01', 'name': 'litterbin'}, {'id': 8940, 'synset': 'little_theater.n.01', 'name': 'little_theater'}, {'id': 8941, 'synset': 'live_axle.n.01', 'name': 'live_axle'}, {'id': 8942, 'synset': 'living_quarters.n.01', 'name': 'living_quarters'}, {'id': 8943, 'synset': 'living_room.n.01', 'name': 'living_room'}, {'id': 8944, 'synset': 'load.n.09', 'name': 'load'}, {'id': 8945, 'synset': 'loafer.n.02', 'name': 'Loafer'}, {'id': 8946, 'synset': 'loaner.n.02', 'name': 'loaner'}, {'id': 8947, 'synset': 'lobe.n.04', 'name': 'lobe'}, {'id': 8948, 'synset': 'lobster_pot.n.01', 'name': 'lobster_pot'}, {'id': 8949, 'synset': 'local.n.01', 'name': 'local'}, {'id': 8950, 'synset': 'local_area_network.n.01', 'name': 'local_area_network'}, {'id': 8951, 'synset': 'local_oscillator.n.01', 'name': 'local_oscillator'}, {'id': 8952, 'synset': 'lochaber_ax.n.01', 'name': 'Lochaber_ax'}, {'id': 8953, 'synset': 'lock.n.01', 'name': 'lock'}, {'id': 8954, 'synset': 'lock.n.05', 'name': 'lock'}, {'id': 8955, 'synset': 'lock.n.04', 'name': 'lock'}, {'id': 8956, 'synset': 'lock.n.03', 'name': 'lock'}, {'id': 8957, 'synset': 'lockage.n.02', 'name': 'lockage'}, {'id': 8958, 'synset': 'locker.n.02', 'name': 'locker'}, {'id': 8959, 'synset': 'locker_room.n.01', 'name': 'locker_room'}, {'id': 8960, 'synset': 'locket.n.01', 'name': 'locket'}, {'id': 8961, 'synset': 'lock-gate.n.01', 'name': 'lock-gate'}, {'id': 8962, 'synset': 'locking_pliers.n.01', 'name': 'locking_pliers'}, {'id': 8963, 'synset': 'lockring.n.01', 'name': 'lockring'}, {'id': 8964, 'synset': 'lockstitch.n.01', 'name': 'lockstitch'}, {'id': 8965, 'synset': 'lockup.n.01', 'name': 'lockup'}, {'id': 8966, 'synset': 'locomotive.n.01', 'name': 'locomotive'}, {'id': 8967, 'synset': 'lodge.n.05', 'name': 'lodge'}, {'id': 8968, 'synset': 'lodge.n.04', 'name': 'lodge'}, {'id': 8969, 'synset': 'lodge.n.03', 'name': 'lodge'}, {'id': 8970, 'synset': 'lodging_house.n.01', 'name': 'lodging_house'}, {'id': 8971, 'synset': 'loft.n.02', 'name': 'loft'}, {'id': 8972, 'synset': 'loft.n.04', 'name': 'loft'}, {'id': 8973, 'synset': 'loft.n.01', 'name': 'loft'}, {'id': 8974, 'synset': 'log_cabin.n.01', 'name': 'log_cabin'}, {'id': 8975, 'synset': 'loggia.n.01', 'name': 'loggia'}, {'id': 8976, 'synset': 'longbow.n.01', 'name': 'longbow'}, {'id': 8977, 'synset': 'long_iron.n.01', 'name': 'long_iron'}, {'id': 8978, 'synset': 'long_johns.n.01', 'name': 'long_johns'}, {'id': 8979, 'synset': 'long_sleeve.n.01', 'name': 'long_sleeve'}, {'id': 8980, 'synset': 'long_tom.n.01', 'name': 'long_tom'}, {'id': 8981, 'synset': 'long_trousers.n.01', 'name': 'long_trousers'}, {'id': 8982, 'synset': 'long_underwear.n.01', 'name': 'long_underwear'}, {'id': 8983, 'synset': 'looking_glass.n.01', 'name': 'looking_glass'}, {'id': 8984, 'synset': 'lookout.n.03', 'name': 'lookout'}, {'id': 8985, 'synset': 'loom.n.01', 'name': 'loom'}, {'id': 8986, 'synset': 'loop_knot.n.01', 'name': 'loop_knot'}, {'id': 8987, 'synset': 'lorgnette.n.01', 'name': 'lorgnette'}, {'id': 8988, 'synset': 'lorraine_cross.n.01', 'name': 'Lorraine_cross'}, {'id': 8989, 'synset': 'lorry.n.02', 'name': 'lorry'}, {'id': 8990, 'synset': 'lota.n.01', 'name': 'lota'}, {'id': 8991, 'synset': 'lotion.n.01', 'name': 'lotion'}, {'id': 8992, 'synset': 'lounge.n.02', 'name': 'lounge'}, {'id': 8993, 'synset': 'lounger.n.03', 'name': 'lounger'}, {'id': 8994, 'synset': 'lounging_jacket.n.01', 'name': 'lounging_jacket'}, {'id': 8995, 'synset': 'lounging_pajama.n.01', 'name': 'lounging_pajama'}, {'id': 8996, 'synset': 'loungewear.n.01', 'name': 'loungewear'}, {'id': 8997, 'synset': 'loupe.n.01', 'name': 'loupe'}, {'id': 8998, 'synset': 'louvered_window.n.01', 'name': 'louvered_window'}, {'id': 8999, 'synset': 'love_knot.n.01', 'name': 'love_knot'}, {'id': 9000, 'synset': 'loving_cup.n.01', 'name': 'loving_cup'}, {'id': 9001, 'synset': 'lowboy.n.01', 'name': 'lowboy'}, {'id': 9002, 'synset': 'low-pass_filter.n.01', 'name': 'low-pass_filter'}, {'id': 9003, 'synset': 'low-warp-loom.n.01', 'name': 'low-warp-loom'}, {'id': 9004, 'synset': 'lp.n.01', 'name': 'LP'}, {'id': 9005, 'synset': 'l-plate.n.01', 'name': 'L-plate'}, {'id': 9006, 'synset': "lubber's_hole.n.01", 'name': "lubber's_hole"}, {'id': 9007, 'synset': 'lubricating_system.n.01', 'name': 'lubricating_system'}, {'id': 9008, 'synset': 'luff.n.01', 'name': 'luff'}, {'id': 9009, 'synset': 'lug.n.03', 'name': 'lug'}, {'id': 9010, 'synset': 'luge.n.01', 'name': 'luge'}, {'id': 9011, 'synset': 'luger.n.01', 'name': 'Luger'}, {'id': 9012, 'synset': 'luggage_carrier.n.01', 'name': 'luggage_carrier'}, {'id': 9013, 'synset': 'luggage_compartment.n.01', 'name': 'luggage_compartment'}, {'id': 9014, 'synset': 'luggage_rack.n.01', 'name': 'luggage_rack'}, {'id': 9015, 'synset': 'lugger.n.01', 'name': 'lugger'}, {'id': 9016, 'synset': 'lugsail.n.01', 'name': 'lugsail'}, {'id': 9017, 'synset': 'lug_wrench.n.01', 'name': 'lug_wrench'}, {'id': 9018, 'synset': 'lumberjack.n.02', 'name': 'lumberjack'}, {'id': 9019, 'synset': 'lumbermill.n.01', 'name': 'lumbermill'}, {'id': 9020, 'synset': 'lunar_excursion_module.n.01', 'name': 'lunar_excursion_module'}, {'id': 9021, 'synset': 'lunchroom.n.01', 'name': 'lunchroom'}, {'id': 9022, 'synset': 'lunette.n.01', 'name': 'lunette'}, {'id': 9023, 'synset': 'lungi.n.01', 'name': 'lungi'}, {'id': 9024, 'synset': 'lunula.n.02', 'name': 'lunula'}, {'id': 9025, 'synset': 'lusterware.n.01', 'name': 'lusterware'}, {'id': 9026, 'synset': 'lute.n.02', 'name': 'lute'}, {'id': 9027, 'synset': 'luxury_liner.n.01', 'name': 'luxury_liner'}, {'id': 9028, 'synset': 'lyceum.n.02', 'name': 'lyceum'}, {'id': 9029, 'synset': 'lychgate.n.01', 'name': 'lychgate'}, {'id': 9030, 'synset': 'lyre.n.01', 'name': 'lyre'}, {'id': 9031, 'synset': 'machete.n.01', 'name': 'machete'}, {'id': 9032, 'synset': 'machicolation.n.01', 'name': 'machicolation'}, {'id': 9033, 'synset': 'machine.n.01', 'name': 'machine'}, {'id': 9034, 'synset': 'machine.n.04', 'name': 'machine'}, {'id': 9035, 'synset': 'machine_bolt.n.01', 'name': 'machine_bolt'}, {'id': 9036, 'synset': 'machinery.n.01', 'name': 'machinery'}, {'id': 9037, 'synset': 'machine_screw.n.01', 'name': 'machine_screw'}, {'id': 9038, 'synset': 'machine_tool.n.01', 'name': 'machine_tool'}, {'id': 9039, 'synset': "machinist's_vise.n.01", 'name': "machinist's_vise"}, {'id': 9040, 'synset': 'machmeter.n.01', 'name': 'machmeter'}, {'id': 9041, 'synset': 'mackinaw.n.04', 'name': 'mackinaw'}, {'id': 9042, 'synset': 'mackinaw.n.03', 'name': 'mackinaw'}, {'id': 9043, 'synset': 'mackinaw.n.01', 'name': 'mackinaw'}, {'id': 9044, 'synset': 'mackintosh.n.01', 'name': 'mackintosh'}, {'id': 9045, 'synset': 'macrame.n.01', 'name': 'macrame'}, {'id': 9046, 'synset': 'madras.n.03', 'name': 'madras'}, {'id': 9047, 'synset': 'mae_west.n.02', 'name': 'Mae_West'}, {'id': 9048, 'synset': 'magazine_rack.n.01', 'name': 'magazine_rack'}, {'id': 9049, 'synset': 'magic_lantern.n.01', 'name': 'magic_lantern'}, {'id': 9050, 'synset': 'magnetic_bottle.n.01', 'name': 'magnetic_bottle'}, {'id': 9051, 'synset': 'magnetic_compass.n.01', 'name': 'magnetic_compass'}, {'id': 9052, 'synset': 'magnetic_core_memory.n.01', 'name': 'magnetic_core_memory'}, {'id': 9053, 'synset': 'magnetic_disk.n.01', 'name': 'magnetic_disk'}, {'id': 9054, 'synset': 'magnetic_head.n.01', 'name': 'magnetic_head'}, {'id': 9055, 'synset': 'magnetic_mine.n.01', 'name': 'magnetic_mine'}, {'id': 9056, 'synset': 'magnetic_needle.n.01', 'name': 'magnetic_needle'}, {'id': 9057, 'synset': 'magnetic_recorder.n.01', 'name': 'magnetic_recorder'}, {'id': 9058, 'synset': 'magnetic_stripe.n.01', 'name': 'magnetic_stripe'}, {'id': 9059, 'synset': 'magnetic_tape.n.01', 'name': 'magnetic_tape'}, {'id': 9060, 'synset': 'magneto.n.01', 'name': 'magneto'}, {'id': 9061, 'synset': 'magnetometer.n.01', 'name': 'magnetometer'}, {'id': 9062, 'synset': 'magnetron.n.01', 'name': 'magnetron'}, {'id': 9063, 'synset': 'magnifier.n.01', 'name': 'magnifier'}, {'id': 9064, 'synset': 'magnum.n.01', 'name': 'magnum'}, {'id': 9065, 'synset': 'magnus_hitch.n.01', 'name': 'magnus_hitch'}, {'id': 9066, 'synset': 'mail.n.03', 'name': 'mail'}, {'id': 9067, 'synset': 'mailbag.n.02', 'name': 'mailbag'}, {'id': 9068, 'synset': 'mailbag.n.01', 'name': 'mailbag'}, {'id': 9069, 'synset': 'mailboat.n.01', 'name': 'mailboat'}, {'id': 9070, 'synset': 'mail_car.n.01', 'name': 'mail_car'}, {'id': 9071, 'synset': 'maildrop.n.01', 'name': 'maildrop'}, {'id': 9072, 'synset': 'mailer.n.04', 'name': 'mailer'}, {'id': 9073, 'synset': 'maillot.n.02', 'name': 'maillot'}, {'id': 9074, 'synset': 'maillot.n.01', 'name': 'maillot'}, {'id': 9075, 'synset': 'mailsorter.n.01', 'name': 'mailsorter'}, {'id': 9076, 'synset': 'mail_train.n.01', 'name': 'mail_train'}, {'id': 9077, 'synset': 'mainframe.n.01', 'name': 'mainframe'}, {'id': 9078, 'synset': 'mainmast.n.01', 'name': 'mainmast'}, {'id': 9079, 'synset': 'main_rotor.n.01', 'name': 'main_rotor'}, {'id': 9080, 'synset': 'mainsail.n.01', 'name': 'mainsail'}, {'id': 9081, 'synset': 'mainspring.n.01', 'name': 'mainspring'}, {'id': 9082, 'synset': 'main-topmast.n.01', 'name': 'main-topmast'}, {'id': 9083, 'synset': 'main-topsail.n.01', 'name': 'main-topsail'}, {'id': 9084, 'synset': 'main_yard.n.01', 'name': 'main_yard'}, {'id': 9085, 'synset': 'maisonette.n.02', 'name': 'maisonette'}, {'id': 9086, 'synset': 'majolica.n.01', 'name': 'majolica'}, {'id': 9087, 'synset': 'makeup.n.01', 'name': 'makeup'}, {'id': 9088, 'synset': 'maksutov_telescope.n.01', 'name': 'Maksutov_telescope'}, {'id': 9089, 'synset': 'malacca.n.02', 'name': 'malacca'}, {'id': 9090, 'synset': 'mallet.n.03', 'name': 'mallet'}, {'id': 9091, 'synset': 'mallet.n.02', 'name': 'mallet'}, {'id': 9092, 'synset': 'mammogram.n.01', 'name': 'mammogram'}, {'id': 9093, 'synset': 'mandola.n.01', 'name': 'mandola'}, {'id': 9094, 'synset': 'mandolin.n.01', 'name': 'mandolin'}, {'id': 9095, 'synset': 'mangle.n.01', 'name': 'mangle'}, {'id': 9096, 'synset': 'manhole_cover.n.01', 'name': 'manhole_cover'}, {'id': 9097, 'synset': 'man-of-war.n.01', 'name': 'man-of-war'}, {'id': 9098, 'synset': 'manometer.n.01', 'name': 'manometer'}, {'id': 9099, 'synset': 'manor.n.01', 'name': 'manor'}, {'id': 9100, 'synset': 'manor_hall.n.01', 'name': 'manor_hall'}, {'id': 9101, 'synset': 'manpad.n.01', 'name': 'MANPAD'}, {'id': 9102, 'synset': 'mansard.n.01', 'name': 'mansard'}, {'id': 9103, 'synset': 'manse.n.02', 'name': 'manse'}, {'id': 9104, 'synset': 'mansion.n.02', 'name': 'mansion'}, {'id': 9105, 'synset': 'mantel.n.01', 'name': 'mantel'}, {'id': 9106, 'synset': 'mantelet.n.02', 'name': 'mantelet'}, {'id': 9107, 'synset': 'mantilla.n.01', 'name': 'mantilla'}, {'id': 9108, 'synset': 'mao_jacket.n.01', 'name': 'Mao_jacket'}, {'id': 9109, 'synset': 'maquiladora.n.01', 'name': 'maquiladora'}, {'id': 9110, 'synset': 'maraca.n.01', 'name': 'maraca'}, {'id': 9111, 'synset': 'marble.n.02', 'name': 'marble'}, {'id': 9112, 'synset': 'marching_order.n.01', 'name': 'marching_order'}, {'id': 9113, 'synset': 'marimba.n.01', 'name': 'marimba'}, {'id': 9114, 'synset': 'marina.n.01', 'name': 'marina'}, {'id': 9115, 'synset': 'marketplace.n.02', 'name': 'marketplace'}, {'id': 9116, 'synset': 'marlinespike.n.01', 'name': 'marlinespike'}, {'id': 9117, 'synset': 'marocain.n.01', 'name': 'marocain'}, {'id': 9118, 'synset': 'marquee.n.02', 'name': 'marquee'}, {'id': 9119, 'synset': 'marquetry.n.01', 'name': 'marquetry'}, {'id': 9120, 'synset': 'marriage_bed.n.01', 'name': 'marriage_bed'}, {'id': 9121, 'synset': 'martello_tower.n.01', 'name': 'martello_tower'}, {'id': 9122, 'synset': 'martingale.n.01', 'name': 'martingale'}, {'id': 9123, 'synset': 'mascara.n.01', 'name': 'mascara'}, {'id': 9124, 'synset': 'maser.n.01', 'name': 'maser'}, {'id': 9125, 'synset': 'mashie.n.01', 'name': 'mashie'}, {'id': 9126, 'synset': 'mashie_niblick.n.01', 'name': 'mashie_niblick'}, {'id': 9127, 'synset': 'masjid.n.01', 'name': 'masjid'}, {'id': 9128, 'synset': 'mask.n.01', 'name': 'mask'}, {'id': 9129, 'synset': 'masonite.n.01', 'name': 'Masonite'}, {'id': 9130, 'synset': 'mason_jar.n.01', 'name': 'Mason_jar'}, {'id': 9131, 'synset': 'masonry.n.01', 'name': 'masonry'}, {'id': 9132, 'synset': "mason's_level.n.01", 'name': "mason's_level"}, {'id': 9133, 'synset': 'massage_parlor.n.02', 'name': 'massage_parlor'}, {'id': 9134, 'synset': 'massage_parlor.n.01', 'name': 'massage_parlor'}, {'id': 9135, 'synset': 'mass_spectrograph.n.01', 'name': 'mass_spectrograph'}, {'id': 9136, 'synset': 'mass_spectrometer.n.01', 'name': 'mass_spectrometer'}, {'id': 9137, 'synset': 'mast.n.04', 'name': 'mast'}, {'id': 9138, 'synset': 'mastaba.n.01', 'name': 'mastaba'}, {'id': 9139, 'synset': 'master_bedroom.n.01', 'name': 'master_bedroom'}, {'id': 9140, 'synset': 'masterpiece.n.01', 'name': 'masterpiece'}, {'id': 9141, 'synset': 'mat.n.01', 'name': 'mat'}, {'id': 9142, 'synset': 'match.n.01', 'name': 'match'}, {'id': 9143, 'synset': 'match.n.03', 'name': 'match'}, {'id': 9144, 'synset': 'matchboard.n.01', 'name': 'matchboard'}, {'id': 9145, 'synset': 'matchbook.n.01', 'name': 'matchbook'}, {'id': 9146, 'synset': 'matchlock.n.01', 'name': 'matchlock'}, {'id': 9147, 'synset': 'match_plane.n.01', 'name': 'match_plane'}, {'id': 9148, 'synset': 'matchstick.n.01', 'name': 'matchstick'}, {'id': 9149, 'synset': 'material.n.04', 'name': 'material'}, {'id': 9150, 'synset': 'materiel.n.01', 'name': 'materiel'}, {'id': 9151, 'synset': 'maternity_hospital.n.01', 'name': 'maternity_hospital'}, {'id': 9152, 'synset': 'maternity_ward.n.01', 'name': 'maternity_ward'}, {'id': 9153, 'synset': 'matrix.n.06', 'name': 'matrix'}, {'id': 9154, 'synset': 'matthew_walker.n.01', 'name': 'Matthew_Walker'}, {'id': 9155, 'synset': 'matting.n.01', 'name': 'matting'}, {'id': 9156, 'synset': 'mattock.n.01', 'name': 'mattock'}, {'id': 9157, 'synset': 'mattress_cover.n.01', 'name': 'mattress_cover'}, {'id': 9158, 'synset': 'maul.n.01', 'name': 'maul'}, {'id': 9159, 'synset': 'maulstick.n.01', 'name': 'maulstick'}, {'id': 9160, 'synset': 'mauser.n.02', 'name': 'Mauser'}, {'id': 9161, 'synset': 'mausoleum.n.01', 'name': 'mausoleum'}, {'id': 9162, 'synset': 'maxi.n.01', 'name': 'maxi'}, {'id': 9163, 'synset': 'maxim_gun.n.01', 'name': 'Maxim_gun'}, {'id': 9164, 'synset': 'maximum_and_minimum_thermometer.n.01', 'name': 'maximum_and_minimum_thermometer'}, {'id': 9165, 'synset': 'maypole.n.01', 'name': 'maypole'}, {'id': 9166, 'synset': 'maze.n.01', 'name': 'maze'}, {'id': 9167, 'synset': 'mazer.n.01', 'name': 'mazer'}, {'id': 9168, 'synset': 'means.n.02', 'name': 'means'}, {'id': 9169, 'synset': 'measure.n.09', 'name': 'measure'}, {'id': 9170, 'synset': 'measuring_instrument.n.01', 'name': 'measuring_instrument'}, {'id': 9171, 'synset': 'meat_counter.n.01', 'name': 'meat_counter'}, {'id': 9172, 'synset': 'meat_grinder.n.01', 'name': 'meat_grinder'}, {'id': 9173, 'synset': 'meat_hook.n.01', 'name': 'meat_hook'}, {'id': 9174, 'synset': 'meat_house.n.02', 'name': 'meat_house'}, {'id': 9175, 'synset': 'meat_safe.n.01', 'name': 'meat_safe'}, {'id': 9176, 'synset': 'meat_thermometer.n.01', 'name': 'meat_thermometer'}, {'id': 9177, 'synset': 'mechanical_device.n.01', 'name': 'mechanical_device'}, {'id': 9178, 'synset': 'mechanical_piano.n.01', 'name': 'mechanical_piano'}, {'id': 9179, 'synset': 'mechanical_system.n.01', 'name': 'mechanical_system'}, {'id': 9180, 'synset': 'mechanism.n.05', 'name': 'mechanism'}, {'id': 9181, 'synset': 'medical_building.n.01', 'name': 'medical_building'}, {'id': 9182, 'synset': 'medical_instrument.n.01', 'name': 'medical_instrument'}, {'id': 9183, 'synset': 'medicine_ball.n.01', 'name': 'medicine_ball'}, {'id': 9184, 'synset': 'medicine_chest.n.01', 'name': 'medicine_chest'}, {'id': 9185, 'synset': 'medline.n.01', 'name': 'MEDLINE'}, {'id': 9186, 'synset': 'megalith.n.01', 'name': 'megalith'}, {'id': 9187, 'synset': 'megaphone.n.01', 'name': 'megaphone'}, {'id': 9188, 'synset': 'memorial.n.03', 'name': 'memorial'}, {'id': 9189, 'synset': 'memory.n.04', 'name': 'memory'}, {'id': 9190, 'synset': 'memory_chip.n.01', 'name': 'memory_chip'}, {'id': 9191, 'synset': 'memory_device.n.01', 'name': 'memory_device'}, {'id': 9192, 'synset': 'menagerie.n.02', 'name': 'menagerie'}, {'id': 9193, 'synset': 'mending.n.01', 'name': 'mending'}, {'id': 9194, 'synset': 'menhir.n.01', 'name': 'menhir'}, {'id': 9195, 'synset': 'menorah.n.02', 'name': 'menorah'}, {'id': 9196, 'synset': 'menorah.n.01', 'name': 'Menorah'}, {'id': 9197, 'synset': "man's_clothing.n.01", 'name': "man's_clothing"}, {'id': 9198, 'synset': "men's_room.n.01", 'name': "men's_room"}, {'id': 9199, 'synset': 'mercantile_establishment.n.01', 'name': 'mercantile_establishment'}, {'id': 9200, 'synset': 'mercury_barometer.n.01', 'name': 'mercury_barometer'}, {'id': 9201, 'synset': 'mercury_cell.n.01', 'name': 'mercury_cell'}, {'id': 9202, 'synset': 'mercury_thermometer.n.01', 'name': 'mercury_thermometer'}, {'id': 9203, 'synset': 'mercury-vapor_lamp.n.01', 'name': 'mercury-vapor_lamp'}, {'id': 9204, 'synset': 'mercy_seat.n.02', 'name': 'mercy_seat'}, {'id': 9205, 'synset': 'merlon.n.01', 'name': 'merlon'}, {'id': 9206, 'synset': 'mess.n.05', 'name': 'mess'}, {'id': 9207, 'synset': 'mess_jacket.n.01', 'name': 'mess_jacket'}, {'id': 9208, 'synset': 'mess_kit.n.01', 'name': 'mess_kit'}, {'id': 9209, 'synset': 'messuage.n.01', 'name': 'messuage'}, {'id': 9210, 'synset': 'metal_detector.n.01', 'name': 'metal_detector'}, {'id': 9211, 'synset': 'metallic.n.01', 'name': 'metallic'}, {'id': 9212, 'synset': 'metal_screw.n.01', 'name': 'metal_screw'}, {'id': 9213, 'synset': 'metal_wood.n.01', 'name': 'metal_wood'}, {'id': 9214, 'synset': 'meteorological_balloon.n.01', 'name': 'meteorological_balloon'}, {'id': 9215, 'synset': 'meter.n.02', 'name': 'meter'}, {'id': 9216, 'synset': 'meterstick.n.01', 'name': 'meterstick'}, {'id': 9217, 'synset': 'metronome.n.01', 'name': 'metronome'}, {'id': 9218, 'synset': 'mezzanine.n.02', 'name': 'mezzanine'}, {'id': 9219, 'synset': 'mezzanine.n.01', 'name': 'mezzanine'}, {'id': 9220, 'synset': 'microbalance.n.01', 'name': 'microbalance'}, {'id': 9221, 'synset': 'microbrewery.n.01', 'name': 'microbrewery'}, {'id': 9222, 'synset': 'microfiche.n.01', 'name': 'microfiche'}, {'id': 9223, 'synset': 'microfilm.n.01', 'name': 'microfilm'}, {'id': 9224, 'synset': 'micrometer.n.02', 'name': 'micrometer'}, {'id': 9225, 'synset': 'microprocessor.n.01', 'name': 'microprocessor'}, {'id': 9226, 'synset': 'microtome.n.01', 'name': 'microtome'}, {'id': 9227, 'synset': 'microwave_diathermy_machine.n.01', 'name': 'microwave_diathermy_machine'}, {'id': 9228, 'synset': 'microwave_linear_accelerator.n.01', 'name': 'microwave_linear_accelerator'}, {'id': 9229, 'synset': 'middy.n.01', 'name': 'middy'}, {'id': 9230, 'synset': 'midiron.n.01', 'name': 'midiron'}, {'id': 9231, 'synset': 'mihrab.n.02', 'name': 'mihrab'}, {'id': 9232, 'synset': 'mihrab.n.01', 'name': 'mihrab'}, {'id': 9233, 'synset': 'military_hospital.n.01', 'name': 'military_hospital'}, {'id': 9234, 'synset': 'military_quarters.n.01', 'name': 'military_quarters'}, {'id': 9235, 'synset': 'military_uniform.n.01', 'name': 'military_uniform'}, {'id': 9236, 'synset': 'military_vehicle.n.01', 'name': 'military_vehicle'}, {'id': 9237, 'synset': 'milk_bar.n.01', 'name': 'milk_bar'}, {'id': 9238, 'synset': 'milk_float.n.01', 'name': 'milk_float'}, {'id': 9239, 'synset': 'milking_machine.n.01', 'name': 'milking_machine'}, {'id': 9240, 'synset': 'milking_stool.n.01', 'name': 'milking_stool'}, {'id': 9241, 'synset': 'milk_wagon.n.01', 'name': 'milk_wagon'}, {'id': 9242, 'synset': 'mill.n.04', 'name': 'mill'}, {'id': 9243, 'synset': 'milldam.n.01', 'name': 'milldam'}, {'id': 9244, 'synset': 'miller.n.05', 'name': 'miller'}, {'id': 9245, 'synset': 'milliammeter.n.01', 'name': 'milliammeter'}, {'id': 9246, 'synset': 'millinery.n.02', 'name': 'millinery'}, {'id': 9247, 'synset': 'millinery.n.01', 'name': 'millinery'}, {'id': 9248, 'synset': 'milling.n.01', 'name': 'milling'}, {'id': 9249, 'synset': 'millivoltmeter.n.01', 'name': 'millivoltmeter'}, {'id': 9250, 'synset': 'millstone.n.03', 'name': 'millstone'}, {'id': 9251, 'synset': 'millstone.n.02', 'name': 'millstone'}, {'id': 9252, 'synset': 'millwheel.n.01', 'name': 'millwheel'}, {'id': 9253, 'synset': 'mimeograph.n.01', 'name': 'mimeograph'}, {'id': 9254, 'synset': 'minaret.n.01', 'name': 'minaret'}, {'id': 9255, 'synset': 'mincer.n.01', 'name': 'mincer'}, {'id': 9256, 'synset': 'mine.n.02', 'name': 'mine'}, {'id': 9257, 'synset': 'mine_detector.n.01', 'name': 'mine_detector'}, {'id': 9258, 'synset': 'minelayer.n.01', 'name': 'minelayer'}, {'id': 9259, 'synset': 'mineshaft.n.01', 'name': 'mineshaft'}, {'id': 9260, 'synset': 'minibar.n.01', 'name': 'minibar'}, {'id': 9261, 'synset': 'minibike.n.01', 'name': 'minibike'}, {'id': 9262, 'synset': 'minibus.n.01', 'name': 'minibus'}, {'id': 9263, 'synset': 'minicar.n.01', 'name': 'minicar'}, {'id': 9264, 'synset': 'minicomputer.n.01', 'name': 'minicomputer'}, {'id': 9265, 'synset': 'ministry.n.02', 'name': 'ministry'}, {'id': 9266, 'synset': 'miniskirt.n.01', 'name': 'miniskirt'}, {'id': 9267, 'synset': 'minisub.n.01', 'name': 'minisub'}, {'id': 9268, 'synset': 'miniver.n.01', 'name': 'miniver'}, {'id': 9269, 'synset': 'mink.n.02', 'name': 'mink'}, {'id': 9270, 'synset': 'minster.n.01', 'name': 'minster'}, {'id': 9271, 'synset': 'mint.n.06', 'name': 'mint'}, {'id': 9272, 'synset': 'minute_hand.n.01', 'name': 'minute_hand'}, {'id': 9273, 'synset': 'minuteman.n.02', 'name': 'Minuteman'}, {'id': 9274, 'synset': 'missile.n.01', 'name': 'missile'}, {'id': 9275, 'synset': 'missile_defense_system.n.01', 'name': 'missile_defense_system'}, {'id': 9276, 'synset': 'miter_box.n.01', 'name': 'miter_box'}, {'id': 9277, 'synset': 'miter_joint.n.01', 'name': 'miter_joint'}, {'id': 9278, 'synset': 'mixer.n.03', 'name': 'mixer'}, {'id': 9279, 'synset': 'mixing_bowl.n.01', 'name': 'mixing_bowl'}, {'id': 9280, 'synset': 'mixing_faucet.n.01', 'name': 'mixing_faucet'}, {'id': 9281, 'synset': 'mizzen.n.02', 'name': 'mizzen'}, {'id': 9282, 'synset': 'mizzenmast.n.01', 'name': 'mizzenmast'}, {'id': 9283, 'synset': 'mobcap.n.01', 'name': 'mobcap'}, {'id': 9284, 'synset': 'mobile_home.n.01', 'name': 'mobile_home'}, {'id': 9285, 'synset': 'moccasin.n.01', 'name': 'moccasin'}, {'id': 9286, 'synset': 'mock-up.n.01', 'name': 'mock-up'}, {'id': 9287, 'synset': 'mod_con.n.01', 'name': 'mod_con'}, {'id': 9288, 'synset': 'model_t.n.01', 'name': 'Model_T'}, {'id': 9289, 'synset': 'modem.n.01', 'name': 'modem'}, {'id': 9290, 'synset': 'modillion.n.01', 'name': 'modillion'}, {'id': 9291, 'synset': 'module.n.03', 'name': 'module'}, {'id': 9292, 'synset': 'module.n.02', 'name': 'module'}, {'id': 9293, 'synset': 'mohair.n.01', 'name': 'mohair'}, {'id': 9294, 'synset': 'moire.n.01', 'name': 'moire'}, {'id': 9295, 'synset': 'mold.n.02', 'name': 'mold'}, {'id': 9296, 'synset': 'moldboard.n.01', 'name': 'moldboard'}, {'id': 9297, 'synset': 'moldboard_plow.n.01', 'name': 'moldboard_plow'}, {'id': 9298, 'synset': 'moleskin.n.01', 'name': 'moleskin'}, {'id': 9299, 'synset': 'molotov_cocktail.n.01', 'name': 'Molotov_cocktail'}, {'id': 9300, 'synset': 'monastery.n.01', 'name': 'monastery'}, {'id': 9301, 'synset': 'monastic_habit.n.01', 'name': 'monastic_habit'}, {'id': 9302, 'synset': 'moneybag.n.01', 'name': 'moneybag'}, {'id': 9303, 'synset': 'money_belt.n.01', 'name': 'money_belt'}, {'id': 9304, 'synset': 'monitor.n.06', 'name': 'monitor'}, {'id': 9305, 'synset': 'monitor.n.05', 'name': 'monitor'}, {'id': 9306, 'synset': 'monkey-wrench.n.01', 'name': 'monkey-wrench'}, {'id': 9307, 'synset': "monk's_cloth.n.01", 'name': "monk's_cloth"}, {'id': 9308, 'synset': 'monochrome.n.01', 'name': 'monochrome'}, {'id': 9309, 'synset': 'monocle.n.01', 'name': 'monocle'}, {'id': 9310, 'synset': 'monofocal_lens_implant.n.01', 'name': 'monofocal_lens_implant'}, {'id': 9311, 'synset': 'monoplane.n.01', 'name': 'monoplane'}, {'id': 9312, 'synset': 'monotype.n.02', 'name': 'monotype'}, {'id': 9313, 'synset': 'monstrance.n.02', 'name': 'monstrance'}, {'id': 9314, 'synset': 'mooring_tower.n.01', 'name': 'mooring_tower'}, {'id': 9315, 'synset': 'moorish_arch.n.01', 'name': 'Moorish_arch'}, {'id': 9316, 'synset': 'moped.n.01', 'name': 'moped'}, {'id': 9317, 'synset': 'mop_handle.n.01', 'name': 'mop_handle'}, {'id': 9318, 'synset': 'moquette.n.01', 'name': 'moquette'}, {'id': 9319, 'synset': 'morgue.n.01', 'name': 'morgue'}, {'id': 9320, 'synset': 'morion.n.01', 'name': 'morion'}, {'id': 9321, 'synset': 'morning_dress.n.02', 'name': 'morning_dress'}, {'id': 9322, 'synset': 'morning_dress.n.01', 'name': 'morning_dress'}, {'id': 9323, 'synset': 'morning_room.n.01', 'name': 'morning_room'}, {'id': 9324, 'synset': 'morris_chair.n.01', 'name': 'Morris_chair'}, {'id': 9325, 'synset': 'mortar.n.01', 'name': 'mortar'}, {'id': 9326, 'synset': 'mortar.n.03', 'name': 'mortar'}, {'id': 9327, 'synset': 'mortarboard.n.02', 'name': 'mortarboard'}, {'id': 9328, 'synset': 'mortise_joint.n.02', 'name': 'mortise_joint'}, {'id': 9329, 'synset': 'mosaic.n.05', 'name': 'mosaic'}, {'id': 9330, 'synset': 'mosque.n.01', 'name': 'mosque'}, {'id': 9331, 'synset': 'mosquito_net.n.01', 'name': 'mosquito_net'}, {'id': 9332, 'synset': 'motel.n.01', 'name': 'motel'}, {'id': 9333, 'synset': 'motel_room.n.01', 'name': 'motel_room'}, {'id': 9334, 'synset': 'mother_hubbard.n.01', 'name': 'Mother_Hubbard'}, {'id': 9335, 'synset': 'motion-picture_camera.n.01', 'name': 'motion-picture_camera'}, {'id': 9336, 'synset': 'motion-picture_film.n.01', 'name': 'motion-picture_film'}, {'id': 9337, 'synset': 'motley.n.03', 'name': 'motley'}, {'id': 9338, 'synset': 'motley.n.02', 'name': 'motley'}, {'id': 9339, 'synset': 'motorboat.n.01', 'name': 'motorboat'}, {'id': 9340, 'synset': 'motor_hotel.n.01', 'name': 'motor_hotel'}, {'id': 9341, 'synset': 'motorized_wheelchair.n.01', 'name': 'motorized_wheelchair'}, {'id': 9342, 'synset': 'mound.n.04', 'name': 'mound'}, {'id': 9343, 'synset': 'mount.n.04', 'name': 'mount'}, {'id': 9344, 'synset': 'mountain_bike.n.01', 'name': 'mountain_bike'}, {'id': 9345, 'synset': 'mountain_tent.n.01', 'name': 'mountain_tent'}, {'id': 9346, 'synset': 'mouse_button.n.01', 'name': 'mouse_button'}, {'id': 9347, 'synset': 'mousetrap.n.01', 'name': 'mousetrap'}, {'id': 9348, 'synset': 'mousse.n.03', 'name': 'mousse'}, {'id': 9349, 'synset': 'mouthpiece.n.06', 'name': 'mouthpiece'}, {'id': 9350, 'synset': 'mouthpiece.n.02', 'name': 'mouthpiece'}, {'id': 9351, 'synset': 'mouthpiece.n.04', 'name': 'mouthpiece'}, {'id': 9352, 'synset': 'movement.n.10', 'name': 'movement'}, {'id': 9353, 'synset': 'movie_projector.n.01', 'name': 'movie_projector'}, {'id': 9354, 'synset': 'moving-coil_galvanometer.n.01', 'name': 'moving-coil_galvanometer'}, {'id': 9355, 'synset': 'moving_van.n.01', 'name': 'moving_van'}, {'id': 9356, 'synset': 'mud_brick.n.01', 'name': 'mud_brick'}, {'id': 9357, 'synset': 'mudguard.n.01', 'name': 'mudguard'}, {'id': 9358, 'synset': 'mudhif.n.01', 'name': 'mudhif'}, {'id': 9359, 'synset': 'muff.n.01', 'name': 'muff'}, {'id': 9360, 'synset': 'muffle.n.01', 'name': 'muffle'}, {'id': 9361, 'synset': 'muffler.n.02', 'name': 'muffler'}, {'id': 9362, 'synset': 'mufti.n.02', 'name': 'mufti'}, {'id': 9363, 'synset': 'mulch.n.01', 'name': 'mulch'}, {'id': 9364, 'synset': 'mule.n.02', 'name': 'mule'}, {'id': 9365, 'synset': 'multichannel_recorder.n.01', 'name': 'multichannel_recorder'}, {'id': 9366, 'synset': 'multiengine_airplane.n.01', 'name': 'multiengine_airplane'}, {'id': 9367, 'synset': 'multiplex.n.02', 'name': 'multiplex'}, {'id': 9368, 'synset': 'multiplexer.n.01', 'name': 'multiplexer'}, {'id': 9369, 'synset': 'multiprocessor.n.01', 'name': 'multiprocessor'}, {'id': 9370, 'synset': 'multistage_rocket.n.01', 'name': 'multistage_rocket'}, {'id': 9371, 'synset': 'munition.n.02', 'name': 'munition'}, {'id': 9372, 'synset': 'murphy_bed.n.01', 'name': 'Murphy_bed'}, {'id': 9373, 'synset': 'musette.n.01', 'name': 'musette'}, {'id': 9374, 'synset': 'musette_pipe.n.01', 'name': 'musette_pipe'}, {'id': 9375, 'synset': 'museum.n.01', 'name': 'museum'}, {'id': 9376, 'synset': 'mushroom_anchor.n.01', 'name': 'mushroom_anchor'}, {'id': 9377, 'synset': 'music_box.n.01', 'name': 'music_box'}, {'id': 9378, 'synset': 'music_hall.n.01', 'name': 'music_hall'}, {'id': 9379, 'synset': 'music_school.n.02', 'name': 'music_school'}, {'id': 9380, 'synset': 'music_stand.n.01', 'name': 'music_stand'}, {'id': 9381, 'synset': 'musket.n.01', 'name': 'musket'}, {'id': 9382, 'synset': 'musket_ball.n.01', 'name': 'musket_ball'}, {'id': 9383, 'synset': 'muslin.n.01', 'name': 'muslin'}, {'id': 9384, 'synset': 'mustache_cup.n.01', 'name': 'mustache_cup'}, {'id': 9385, 'synset': 'mustard_plaster.n.01', 'name': 'mustard_plaster'}, {'id': 9386, 'synset': 'mute.n.02', 'name': 'mute'}, {'id': 9387, 'synset': 'muzzle_loader.n.01', 'name': 'muzzle_loader'}, {'id': 9388, 'synset': 'muzzle.n.03', 'name': 'muzzle'}, {'id': 9389, 'synset': 'myelogram.n.01', 'name': 'myelogram'}, {'id': 9390, 'synset': 'nacelle.n.01', 'name': 'nacelle'}, {'id': 9391, 'synset': 'nail.n.02', 'name': 'nail'}, {'id': 9392, 'synset': 'nailbrush.n.01', 'name': 'nailbrush'}, {'id': 9393, 'synset': 'nailhead.n.02', 'name': 'nailhead'}, {'id': 9394, 'synset': 'nailhead.n.01', 'name': 'nailhead'}, {'id': 9395, 'synset': 'nail_polish.n.01', 'name': 'nail_polish'}, {'id': 9396, 'synset': 'nainsook.n.01', 'name': 'nainsook'}, {'id': 9397, 'synset': "napier's_bones.n.01", 'name': "Napier's_bones"}, {'id': 9398, 'synset': 'nard.n.01', 'name': 'nard'}, {'id': 9399, 'synset': 'narrowbody_aircraft.n.01', 'name': 'narrowbody_aircraft'}, {'id': 9400, 'synset': 'narrow_wale.n.01', 'name': 'narrow_wale'}, {'id': 9401, 'synset': 'narthex.n.02', 'name': 'narthex'}, {'id': 9402, 'synset': 'narthex.n.01', 'name': 'narthex'}, {'id': 9403, 'synset': 'nasotracheal_tube.n.01', 'name': 'nasotracheal_tube'}, {'id': 9404, 'synset': 'national_monument.n.01', 'name': 'national_monument'}, {'id': 9405, 'synset': 'nautilus.n.01', 'name': 'nautilus'}, {'id': 9406, 'synset': 'navigational_system.n.01', 'name': 'navigational_system'}, {'id': 9407, 'synset': 'naval_equipment.n.01', 'name': 'naval_equipment'}, {'id': 9408, 'synset': 'naval_gun.n.01', 'name': 'naval_gun'}, {'id': 9409, 'synset': 'naval_missile.n.01', 'name': 'naval_missile'}, {'id': 9410, 'synset': 'naval_radar.n.01', 'name': 'naval_radar'}, {'id': 9411, 'synset': 'naval_tactical_data_system.n.01', 'name': 'naval_tactical_data_system'}, {'id': 9412, 'synset': 'naval_weaponry.n.01', 'name': 'naval_weaponry'}, {'id': 9413, 'synset': 'nave.n.01', 'name': 'nave'}, {'id': 9414, 'synset': 'navigational_instrument.n.01', 'name': 'navigational_instrument'}, {'id': 9415, 'synset': 'nebuchadnezzar.n.02', 'name': 'nebuchadnezzar'}, {'id': 9416, 'synset': 'neckband.n.01', 'name': 'neckband'}, {'id': 9417, 'synset': 'neck_brace.n.01', 'name': 'neck_brace'}, {'id': 9418, 'synset': 'neckcloth.n.01', 'name': 'neckcloth'}, {'id': 9419, 'synset': 'necklet.n.01', 'name': 'necklet'}, {'id': 9420, 'synset': 'neckline.n.01', 'name': 'neckline'}, {'id': 9421, 'synset': 'neckpiece.n.01', 'name': 'neckpiece'}, {'id': 9422, 'synset': 'neckwear.n.01', 'name': 'neckwear'}, {'id': 9423, 'synset': 'needle.n.02', 'name': 'needle'}, {'id': 9424, 'synset': 'needlenose_pliers.n.01', 'name': 'needlenose_pliers'}, {'id': 9425, 'synset': 'needlework.n.01', 'name': 'needlework'}, {'id': 9426, 'synset': 'negative.n.02', 'name': 'negative'}, {'id': 9427, 'synset': 'negative_magnetic_pole.n.01', 'name': 'negative_magnetic_pole'}, {'id': 9428, 'synset': 'negative_pole.n.01', 'name': 'negative_pole'}, {'id': 9429, 'synset': 'negligee.n.01', 'name': 'negligee'}, {'id': 9430, 'synset': 'neolith.n.01', 'name': 'neolith'}, {'id': 9431, 'synset': 'neon_lamp.n.01', 'name': 'neon_lamp'}, {'id': 9432, 'synset': 'nephoscope.n.01', 'name': 'nephoscope'}, {'id': 9433, 'synset': 'nest.n.05', 'name': 'nest'}, {'id': 9434, 'synset': 'nest_egg.n.02', 'name': 'nest_egg'}, {'id': 9435, 'synset': 'net.n.06', 'name': 'net'}, {'id': 9436, 'synset': 'net.n.02', 'name': 'net'}, {'id': 9437, 'synset': 'net.n.05', 'name': 'net'}, {'id': 9438, 'synset': 'net.n.04', 'name': 'net'}, {'id': 9439, 'synset': 'network.n.05', 'name': 'network'}, {'id': 9440, 'synset': 'network.n.04', 'name': 'network'}, {'id': 9441, 'synset': 'neutron_bomb.n.01', 'name': 'neutron_bomb'}, {'id': 9442, 'synset': 'newel.n.02', 'name': 'newel'}, {'id': 9443, 'synset': 'newel_post.n.01', 'name': 'newel_post'}, {'id': 9444, 'synset': 'newspaper.n.03', 'name': 'newspaper'}, {'id': 9445, 'synset': 'newsroom.n.03', 'name': 'newsroom'}, {'id': 9446, 'synset': 'newsroom.n.02', 'name': 'newsroom'}, {'id': 9447, 'synset': 'newtonian_telescope.n.01', 'name': 'Newtonian_telescope'}, {'id': 9448, 'synset': 'nib.n.01', 'name': 'nib'}, {'id': 9449, 'synset': 'niblick.n.01', 'name': 'niblick'}, {'id': 9450, 'synset': 'nicad.n.01', 'name': 'nicad'}, {'id': 9451, 'synset': 'nickel-iron_battery.n.01', 'name': 'nickel-iron_battery'}, {'id': 9452, 'synset': 'nicol_prism.n.01', 'name': 'Nicol_prism'}, {'id': 9453, 'synset': 'night_bell.n.01', 'name': 'night_bell'}, {'id': 9454, 'synset': 'nightcap.n.02', 'name': 'nightcap'}, {'id': 9455, 'synset': 'nightgown.n.01', 'name': 'nightgown'}, {'id': 9456, 'synset': 'night_latch.n.01', 'name': 'night_latch'}, {'id': 9457, 'synset': 'night-light.n.01', 'name': 'night-light'}, {'id': 9458, 'synset': 'nightshirt.n.01', 'name': 'nightshirt'}, {'id': 9459, 'synset': 'ninepin.n.01', 'name': 'ninepin'}, {'id': 9460, 'synset': 'ninepin_ball.n.01', 'name': 'ninepin_ball'}, {'id': 9461, 'synset': 'ninon.n.01', 'name': 'ninon'}, {'id': 9462, 'synset': 'nipple.n.02', 'name': 'nipple'}, {'id': 9463, 'synset': 'nipple_shield.n.01', 'name': 'nipple_shield'}, {'id': 9464, 'synset': 'niqab.n.01', 'name': 'niqab'}, {'id': 9465, 'synset': 'nissen_hut.n.01', 'name': 'Nissen_hut'}, {'id': 9466, 'synset': 'nogging.n.01', 'name': 'nogging'}, {'id': 9467, 'synset': 'noisemaker.n.01', 'name': 'noisemaker'}, {'id': 9468, 'synset': 'nonsmoker.n.02', 'name': 'nonsmoker'}, {'id': 9469, 'synset': 'non-volatile_storage.n.01', 'name': 'non-volatile_storage'}, {'id': 9470, 'synset': 'norfolk_jacket.n.01', 'name': 'Norfolk_jacket'}, {'id': 9471, 'synset': 'noria.n.01', 'name': 'noria'}, {'id': 9472, 'synset': 'nose_flute.n.01', 'name': 'nose_flute'}, {'id': 9473, 'synset': 'nosewheel.n.01', 'name': 'nosewheel'}, {'id': 9474, 'synset': 'notebook.n.02', 'name': 'notebook'}, {'id': 9475, 'synset': 'nuclear-powered_ship.n.01', 'name': 'nuclear-powered_ship'}, {'id': 9476, 'synset': 'nuclear_reactor.n.01', 'name': 'nuclear_reactor'}, {'id': 9477, 'synset': 'nuclear_rocket.n.01', 'name': 'nuclear_rocket'}, {'id': 9478, 'synset': 'nuclear_weapon.n.01', 'name': 'nuclear_weapon'}, {'id': 9479, 'synset': 'nude.n.01', 'name': 'nude'}, {'id': 9480, 'synset': 'numdah.n.01', 'name': 'numdah'}, {'id': 9481, 'synset': "nun's_habit.n.01", 'name': "nun's_habit"}, {'id': 9482, 'synset': 'nursery.n.01', 'name': 'nursery'}, {'id': 9483, 'synset': 'nut_and_bolt.n.01', 'name': 'nut_and_bolt'}, {'id': 9484, 'synset': 'nylon.n.02', 'name': 'nylon'}, {'id': 9485, 'synset': 'nylons.n.01', 'name': 'nylons'}, {'id': 9486, 'synset': 'oast.n.01', 'name': 'oast'}, {'id': 9487, 'synset': 'oast_house.n.01', 'name': 'oast_house'}, {'id': 9488, 'synset': 'obelisk.n.01', 'name': 'obelisk'}, {'id': 9489, 'synset': 'object_ball.n.01', 'name': 'object_ball'}, {'id': 9490, 'synset': 'objective.n.02', 'name': 'objective'}, {'id': 9491, 'synset': 'oblique_bandage.n.01', 'name': 'oblique_bandage'}, {'id': 9492, 'synset': 'oboe.n.01', 'name': 'oboe'}, {'id': 9493, 'synset': 'oboe_da_caccia.n.01', 'name': 'oboe_da_caccia'}, {'id': 9494, 'synset': "oboe_d'amore.n.01", 'name': "oboe_d'amore"}, {'id': 9495, 'synset': 'observation_dome.n.01', 'name': 'observation_dome'}, {'id': 9496, 'synset': 'observatory.n.01', 'name': 'observatory'}, {'id': 9497, 'synset': 'obstacle.n.02', 'name': 'obstacle'}, {'id': 9498, 'synset': 'obturator.n.01', 'name': 'obturator'}, {'id': 9499, 'synset': 'ocarina.n.01', 'name': 'ocarina'}, {'id': 9500, 'synset': 'octant.n.01', 'name': 'octant'}, {'id': 9501, 'synset': 'odd-leg_caliper.n.01', 'name': 'odd-leg_caliper'}, {'id': 9502, 'synset': 'odometer.n.01', 'name': 'odometer'}, {'id': 9503, 'synset': 'oeil_de_boeuf.n.01', 'name': 'oeil_de_boeuf'}, {'id': 9504, 'synset': 'office.n.01', 'name': 'office'}, {'id': 9505, 'synset': 'office_building.n.01', 'name': 'office_building'}, {'id': 9506, 'synset': 'office_furniture.n.01', 'name': 'office_furniture'}, {'id': 9507, 'synset': "officer's_mess.n.01", 'name': "officer's_mess"}, {'id': 9508, 'synset': 'off-line_equipment.n.01', 'name': 'off-line_equipment'}, {'id': 9509, 'synset': 'ogee.n.01', 'name': 'ogee'}, {'id': 9510, 'synset': 'ogee_arch.n.01', 'name': 'ogee_arch'}, {'id': 9511, 'synset': 'ohmmeter.n.01', 'name': 'ohmmeter'}, {'id': 9512, 'synset': 'oil.n.02', 'name': 'oil'}, {'id': 9513, 'synset': 'oilcan.n.01', 'name': 'oilcan'}, {'id': 9514, 'synset': 'oilcloth.n.01', 'name': 'oilcloth'}, {'id': 9515, 'synset': 'oil_filter.n.01', 'name': 'oil_filter'}, {'id': 9516, 'synset': 'oil_heater.n.01', 'name': 'oil_heater'}, {'id': 9517, 'synset': 'oil_paint.n.01', 'name': 'oil_paint'}, {'id': 9518, 'synset': 'oil_pump.n.01', 'name': 'oil_pump'}, {'id': 9519, 'synset': 'oil_refinery.n.01', 'name': 'oil_refinery'}, {'id': 9520, 'synset': 'oilskin.n.01', 'name': 'oilskin'}, {'id': 9521, 'synset': 'oil_slick.n.01', 'name': 'oil_slick'}, {'id': 9522, 'synset': 'oilstone.n.01', 'name': 'oilstone'}, {'id': 9523, 'synset': 'oil_tanker.n.01', 'name': 'oil_tanker'}, {'id': 9524, 'synset': 'old_school_tie.n.01', 'name': 'old_school_tie'}, {'id': 9525, 'synset': 'olive_drab.n.03', 'name': 'olive_drab'}, {'id': 9526, 'synset': 'olive_drab.n.02', 'name': 'olive_drab'}, {'id': 9527, 'synset': 'olympian_zeus.n.01', 'name': 'Olympian_Zeus'}, {'id': 9528, 'synset': 'omelet_pan.n.01', 'name': 'omelet_pan'}, {'id': 9529, 'synset': 'omnidirectional_antenna.n.01', 'name': 'omnidirectional_antenna'}, {'id': 9530, 'synset': 'omnirange.n.01', 'name': 'omnirange'}, {'id': 9531, 'synset': 'onion_dome.n.01', 'name': 'onion_dome'}, {'id': 9532, 'synset': 'open-air_market.n.01', 'name': 'open-air_market'}, {'id': 9533, 'synset': 'open_circuit.n.01', 'name': 'open_circuit'}, {'id': 9534, 'synset': 'open-end_wrench.n.01', 'name': 'open-end_wrench'}, {'id': 9535, 'synset': 'opener.n.03', 'name': 'opener'}, {'id': 9536, 'synset': 'open-hearth_furnace.n.01', 'name': 'open-hearth_furnace'}, {'id': 9537, 'synset': 'openside_plane.n.01', 'name': 'openside_plane'}, {'id': 9538, 'synset': 'open_sight.n.01', 'name': 'open_sight'}, {'id': 9539, 'synset': 'openwork.n.01', 'name': 'openwork'}, {'id': 9540, 'synset': 'opera.n.03', 'name': 'opera'}, {'id': 9541, 'synset': 'opera_cloak.n.01', 'name': 'opera_cloak'}, {'id': 9542, 'synset': 'operating_microscope.n.01', 'name': 'operating_microscope'}, {'id': 9543, 'synset': 'operating_room.n.01', 'name': 'operating_room'}, {'id': 9544, 'synset': 'operating_table.n.01', 'name': 'operating_table'}, {'id': 9545, 'synset': 'ophthalmoscope.n.01', 'name': 'ophthalmoscope'}, {'id': 9546, 'synset': 'optical_device.n.01', 'name': 'optical_device'}, {'id': 9547, 'synset': 'optical_disk.n.01', 'name': 'optical_disk'}, {'id': 9548, 'synset': 'optical_instrument.n.01', 'name': 'optical_instrument'}, {'id': 9549, 'synset': 'optical_pyrometer.n.01', 'name': 'optical_pyrometer'}, {'id': 9550, 'synset': 'optical_telescope.n.01', 'name': 'optical_telescope'}, {'id': 9551, 'synset': 'orchestra_pit.n.01', 'name': 'orchestra_pit'}, {'id': 9552, 'synset': 'ordinary.n.04', 'name': 'ordinary'}, {'id': 9553, 'synset': 'organ.n.05', 'name': 'organ'}, {'id': 9554, 'synset': 'organdy.n.01', 'name': 'organdy'}, {'id': 9555, 'synset': 'organic_light-emitting_diode.n.01', 'name': 'organic_light-emitting_diode'}, {'id': 9556, 'synset': 'organ_loft.n.01', 'name': 'organ_loft'}, {'id': 9557, 'synset': 'organ_pipe.n.01', 'name': 'organ_pipe'}, {'id': 9558, 'synset': 'organza.n.01', 'name': 'organza'}, {'id': 9559, 'synset': 'oriel.n.01', 'name': 'oriel'}, {'id': 9560, 'synset': 'oriflamme.n.02', 'name': 'oriflamme'}, {'id': 9561, 'synset': 'o_ring.n.01', 'name': 'O_ring'}, {'id': 9562, 'synset': 'orlon.n.01', 'name': 'Orlon'}, {'id': 9563, 'synset': 'orlop_deck.n.01', 'name': 'orlop_deck'}, {'id': 9564, 'synset': 'orphanage.n.02', 'name': 'orphanage'}, {'id': 9565, 'synset': 'orphrey.n.01', 'name': 'orphrey'}, {'id': 9566, 'synset': 'orrery.n.01', 'name': 'orrery'}, {'id': 9567, 'synset': 'orthicon.n.01', 'name': 'orthicon'}, {'id': 9568, 'synset': 'orthochromatic_film.n.01', 'name': 'orthochromatic_film'}, {'id': 9569, 'synset': 'orthopter.n.01', 'name': 'orthopter'}, {'id': 9570, 'synset': 'orthoscope.n.01', 'name': 'orthoscope'}, {'id': 9571, 'synset': 'oscillograph.n.01', 'name': 'oscillograph'}, {'id': 9572, 'synset': 'oscilloscope.n.01', 'name': 'oscilloscope'}, {'id': 9573, 'synset': 'ossuary.n.01', 'name': 'ossuary'}, {'id': 9574, 'synset': 'otoscope.n.01', 'name': 'otoscope'}, {'id': 9575, 'synset': 'oubliette.n.01', 'name': 'oubliette'}, {'id': 9576, 'synset': 'out-basket.n.01', 'name': 'out-basket'}, {'id': 9577, 'synset': 'outboard_motor.n.01', 'name': 'outboard_motor'}, {'id': 9578, 'synset': 'outboard_motorboat.n.01', 'name': 'outboard_motorboat'}, {'id': 9579, 'synset': 'outbuilding.n.01', 'name': 'outbuilding'}, {'id': 9580, 'synset': 'outerwear.n.01', 'name': 'outerwear'}, {'id': 9581, 'synset': 'outfall.n.01', 'name': 'outfall'}, {'id': 9582, 'synset': 'outfit.n.02', 'name': 'outfit'}, {'id': 9583, 'synset': 'outfitter.n.02', 'name': 'outfitter'}, {'id': 9584, 'synset': 'outhouse.n.01', 'name': 'outhouse'}, {'id': 9585, 'synset': 'output_device.n.01', 'name': 'output_device'}, {'id': 9586, 'synset': 'outrigger.n.01', 'name': 'outrigger'}, {'id': 9587, 'synset': 'outrigger_canoe.n.01', 'name': 'outrigger_canoe'}, {'id': 9588, 'synset': 'outside_caliper.n.01', 'name': 'outside_caliper'}, {'id': 9589, 'synset': 'outside_mirror.n.01', 'name': 'outside_mirror'}, {'id': 9590, 'synset': 'outwork.n.01', 'name': 'outwork'}, {'id': 9591, 'synset': 'oven_thermometer.n.01', 'name': 'oven_thermometer'}, {'id': 9592, 'synset': 'overall.n.02', 'name': 'overall'}, {'id': 9593, 'synset': 'overcoat.n.02', 'name': 'overcoat'}, {'id': 9594, 'synset': 'overdrive.n.02', 'name': 'overdrive'}, {'id': 9595, 'synset': 'overgarment.n.01', 'name': 'overgarment'}, {'id': 9596, 'synset': 'overhand_knot.n.01', 'name': 'overhand_knot'}, {'id': 9597, 'synset': 'overhang.n.01', 'name': 'overhang'}, {'id': 9598, 'synset': 'overhead_projector.n.01', 'name': 'overhead_projector'}, {'id': 9599, 'synset': 'overmantel.n.01', 'name': 'overmantel'}, {'id': 9600, 'synset': 'overnighter.n.02', 'name': 'overnighter'}, {'id': 9601, 'synset': 'overpass.n.01', 'name': 'overpass'}, {'id': 9602, 'synset': 'override.n.01', 'name': 'override'}, {'id': 9603, 'synset': 'overshoe.n.01', 'name': 'overshoe'}, {'id': 9604, 'synset': 'overskirt.n.01', 'name': 'overskirt'}, {'id': 9605, 'synset': 'oxbow.n.03', 'name': 'oxbow'}, {'id': 9606, 'synset': 'oxbridge.n.01', 'name': 'Oxbridge'}, {'id': 9607, 'synset': 'oxcart.n.01', 'name': 'oxcart'}, {'id': 9608, 'synset': 'oxeye.n.03', 'name': 'oxeye'}, {'id': 9609, 'synset': 'oxford.n.04', 'name': 'oxford'}, {'id': 9610, 'synset': 'oximeter.n.01', 'name': 'oximeter'}, {'id': 9611, 'synset': 'oxyacetylene_torch.n.01', 'name': 'oxyacetylene_torch'}, {'id': 9612, 'synset': 'oxygen_mask.n.01', 'name': 'oxygen_mask'}, {'id': 9613, 'synset': 'oyster_bar.n.01', 'name': 'oyster_bar'}, {'id': 9614, 'synset': 'oyster_bed.n.01', 'name': 'oyster_bed'}, {'id': 9615, 'synset': 'pace_car.n.01', 'name': 'pace_car'}, {'id': 9616, 'synset': 'pacemaker.n.03', 'name': 'pacemaker'}, {'id': 9617, 'synset': 'pack.n.03', 'name': 'pack'}, {'id': 9618, 'synset': 'pack.n.09', 'name': 'pack'}, {'id': 9619, 'synset': 'pack.n.07', 'name': 'pack'}, {'id': 9620, 'synset': 'package.n.02', 'name': 'package'}, {'id': 9621, 'synset': 'package_store.n.01', 'name': 'package_store'}, {'id': 9622, 'synset': 'packaging.n.03', 'name': 'packaging'}, {'id': 9623, 'synset': 'packing_box.n.02', 'name': 'packing_box'}, {'id': 9624, 'synset': 'packinghouse.n.02', 'name': 'packinghouse'}, {'id': 9625, 'synset': 'packinghouse.n.01', 'name': 'packinghouse'}, {'id': 9626, 'synset': 'packing_needle.n.01', 'name': 'packing_needle'}, {'id': 9627, 'synset': 'packsaddle.n.01', 'name': 'packsaddle'}, {'id': 9628, 'synset': 'paddle.n.02', 'name': 'paddle'}, {'id': 9629, 'synset': 'paddle.n.01', 'name': 'paddle'}, {'id': 9630, 'synset': 'paddle_box.n.01', 'name': 'paddle_box'}, {'id': 9631, 'synset': 'paddle_steamer.n.01', 'name': 'paddle_steamer'}, {'id': 9632, 'synset': 'paddlewheel.n.01', 'name': 'paddlewheel'}, {'id': 9633, 'synset': 'paddock.n.01', 'name': 'paddock'}, {'id': 9634, 'synset': 'page_printer.n.01', 'name': 'page_printer'}, {'id': 9635, 'synset': 'paint.n.01', 'name': 'paint'}, {'id': 9636, 'synset': 'paintball.n.01', 'name': 'paintball'}, {'id': 9637, 'synset': 'paintball_gun.n.01', 'name': 'paintball_gun'}, {'id': 9638, 'synset': 'paintbox.n.01', 'name': 'paintbox'}, {'id': 9639, 'synset': 'paisley.n.01', 'name': 'paisley'}, {'id': 9640, 'synset': 'pajama.n.01', 'name': 'pajama'}, {'id': 9641, 'synset': 'palace.n.04', 'name': 'palace'}, {'id': 9642, 'synset': 'palace.n.01', 'name': 'palace'}, {'id': 9643, 'synset': 'palace.n.03', 'name': 'palace'}, {'id': 9644, 'synset': 'palanquin.n.01', 'name': 'palanquin'}, {'id': 9645, 'synset': 'paleolith.n.01', 'name': 'paleolith'}, {'id': 9646, 'synset': 'palestra.n.01', 'name': 'palestra'}, {'id': 9647, 'synset': 'palette_knife.n.01', 'name': 'palette_knife'}, {'id': 9648, 'synset': 'palisade.n.01', 'name': 'palisade'}, {'id': 9649, 'synset': 'pallet.n.03', 'name': 'pallet'}, {'id': 9650, 'synset': 'pallette.n.01', 'name': 'pallette'}, {'id': 9651, 'synset': 'pallium.n.04', 'name': 'pallium'}, {'id': 9652, 'synset': 'pallium.n.03', 'name': 'pallium'}, {'id': 9653, 'synset': 'pancake_turner.n.01', 'name': 'pancake_turner'}, {'id': 9654, 'synset': 'panchromatic_film.n.01', 'name': 'panchromatic_film'}, {'id': 9655, 'synset': 'panda_car.n.01', 'name': 'panda_car'}, {'id': 9656, 'synset': 'paneling.n.01', 'name': 'paneling'}, {'id': 9657, 'synset': 'panhandle.n.02', 'name': 'panhandle'}, {'id': 9658, 'synset': 'panic_button.n.01', 'name': 'panic_button'}, {'id': 9659, 'synset': 'pannier.n.02', 'name': 'pannier'}, {'id': 9660, 'synset': 'pannier.n.01', 'name': 'pannier'}, {'id': 9661, 'synset': 'pannikin.n.01', 'name': 'pannikin'}, {'id': 9662, 'synset': 'panopticon.n.02', 'name': 'panopticon'}, {'id': 9663, 'synset': 'panopticon.n.01', 'name': 'panopticon'}, {'id': 9664, 'synset': 'panpipe.n.01', 'name': 'panpipe'}, {'id': 9665, 'synset': 'pantaloon.n.03', 'name': 'pantaloon'}, {'id': 9666, 'synset': 'pantechnicon.n.01', 'name': 'pantechnicon'}, {'id': 9667, 'synset': 'pantheon.n.03', 'name': 'pantheon'}, {'id': 9668, 'synset': 'pantheon.n.02', 'name': 'pantheon'}, {'id': 9669, 'synset': 'pantie.n.01', 'name': 'pantie'}, {'id': 9670, 'synset': 'panting.n.02', 'name': 'panting'}, {'id': 9671, 'synset': 'pant_leg.n.01', 'name': 'pant_leg'}, {'id': 9672, 'synset': 'pantograph.n.01', 'name': 'pantograph'}, {'id': 9673, 'synset': 'pantry.n.01', 'name': 'pantry'}, {'id': 9674, 'synset': 'pants_suit.n.01', 'name': 'pants_suit'}, {'id': 9675, 'synset': 'panty_girdle.n.01', 'name': 'panty_girdle'}, {'id': 9676, 'synset': 'panzer.n.01', 'name': 'panzer'}, {'id': 9677, 'synset': 'paper_chain.n.01', 'name': 'paper_chain'}, {'id': 9678, 'synset': 'paper_clip.n.01', 'name': 'paper_clip'}, {'id': 9679, 'synset': 'paper_cutter.n.01', 'name': 'paper_cutter'}, {'id': 9680, 'synset': 'paper_fastener.n.01', 'name': 'paper_fastener'}, {'id': 9681, 'synset': 'paper_feed.n.01', 'name': 'paper_feed'}, {'id': 9682, 'synset': 'paper_mill.n.01', 'name': 'paper_mill'}, {'id': 9683, 'synset': 'parabolic_mirror.n.01', 'name': 'parabolic_mirror'}, {'id': 9684, 'synset': 'parabolic_reflector.n.01', 'name': 'parabolic_reflector'}, {'id': 9685, 'synset': 'parallel_bars.n.01', 'name': 'parallel_bars'}, {'id': 9686, 'synset': 'parallel_circuit.n.01', 'name': 'parallel_circuit'}, {'id': 9687, 'synset': 'parallel_interface.n.01', 'name': 'parallel_interface'}, {'id': 9688, 'synset': 'parang.n.01', 'name': 'parang'}, {'id': 9689, 'synset': 'parapet.n.02', 'name': 'parapet'}, {'id': 9690, 'synset': 'parapet.n.01', 'name': 'parapet'}, {'id': 9691, 'synset': 'parer.n.02', 'name': 'parer'}, {'id': 9692, 'synset': 'parfait_glass.n.01', 'name': 'parfait_glass'}, {'id': 9693, 'synset': 'pargeting.n.02', 'name': 'pargeting'}, {'id': 9694, 'synset': 'pari-mutuel_machine.n.01', 'name': 'pari-mutuel_machine'}, {'id': 9695, 'synset': 'park_bench.n.01', 'name': 'park_bench'}, {'id': 9696, 'synset': 'parlor.n.01', 'name': 'parlor'}, {'id': 9697, 'synset': 'parquet.n.01', 'name': 'parquet'}, {'id': 9698, 'synset': 'parquetry.n.01', 'name': 'parquetry'}, {'id': 9699, 'synset': 'parsonage.n.01', 'name': 'parsonage'}, {'id': 9700, 'synset': 'parsons_table.n.01', 'name': 'Parsons_table'}, {'id': 9701, 'synset': 'partial_denture.n.01', 'name': 'partial_denture'}, {'id': 9702, 'synset': 'particle_detector.n.01', 'name': 'particle_detector'}, {'id': 9703, 'synset': 'partition.n.01', 'name': 'partition'}, {'id': 9704, 'synset': 'parts_bin.n.01', 'name': 'parts_bin'}, {'id': 9705, 'synset': 'party_line.n.02', 'name': 'party_line'}, {'id': 9706, 'synset': 'party_wall.n.01', 'name': 'party_wall'}, {'id': 9707, 'synset': 'parvis.n.01', 'name': 'parvis'}, {'id': 9708, 'synset': 'passenger_train.n.01', 'name': 'passenger_train'}, {'id': 9709, 'synset': 'passenger_van.n.01', 'name': 'passenger_van'}, {'id': 9710, 'synset': 'passe-partout.n.02', 'name': 'passe-partout'}, {'id': 9711, 'synset': 'passive_matrix_display.n.01', 'name': 'passive_matrix_display'}, {'id': 9712, 'synset': 'passkey.n.01', 'name': 'passkey'}, {'id': 9713, 'synset': 'pass-through.n.01', 'name': 'pass-through'}, {'id': 9714, 'synset': 'pastry_cart.n.01', 'name': 'pastry_cart'}, {'id': 9715, 'synset': 'patch.n.03', 'name': 'patch'}, {'id': 9716, 'synset': 'patchcord.n.01', 'name': 'patchcord'}, {'id': 9717, 'synset': 'patchouli.n.02', 'name': 'patchouli'}, {'id': 9718, 'synset': 'patch_pocket.n.01', 'name': 'patch_pocket'}, {'id': 9719, 'synset': 'patchwork.n.02', 'name': 'patchwork'}, {'id': 9720, 'synset': 'patent_log.n.01', 'name': 'patent_log'}, {'id': 9721, 'synset': 'paternoster.n.02', 'name': 'paternoster'}, {'id': 9722, 'synset': 'patina.n.01', 'name': 'patina'}, {'id': 9723, 'synset': 'patio.n.01', 'name': 'patio'}, {'id': 9724, 'synset': 'patisserie.n.01', 'name': 'patisserie'}, {'id': 9725, 'synset': 'patka.n.01', 'name': 'patka'}, {'id': 9726, 'synset': 'patrol_boat.n.01', 'name': 'patrol_boat'}, {'id': 9727, 'synset': 'patty-pan.n.01', 'name': 'patty-pan'}, {'id': 9728, 'synset': 'pave.n.01', 'name': 'pave'}, {'id': 9729, 'synset': 'pavilion.n.01', 'name': 'pavilion'}, {'id': 9730, 'synset': 'pavior.n.01', 'name': 'pavior'}, {'id': 9731, 'synset': 'pavis.n.01', 'name': 'pavis'}, {'id': 9732, 'synset': 'pawn.n.03', 'name': 'pawn'}, {'id': 9733, 'synset': "pawnbroker's_shop.n.01", 'name': "pawnbroker's_shop"}, {'id': 9734, 'synset': 'pay-phone.n.01', 'name': 'pay-phone'}, {'id': 9735, 'synset': 'pc_board.n.01', 'name': 'PC_board'}, {'id': 9736, 'synset': 'peach_orchard.n.01', 'name': 'peach_orchard'}, {'id': 9737, 'synset': 'pea_jacket.n.01', 'name': 'pea_jacket'}, {'id': 9738, 'synset': 'peavey.n.01', 'name': 'peavey'}, {'id': 9739, 'synset': 'pectoral.n.02', 'name': 'pectoral'}, {'id': 9740, 'synset': 'pedal.n.02', 'name': 'pedal'}, {'id': 9741, 'synset': 'pedal_pusher.n.01', 'name': 'pedal_pusher'}, {'id': 9742, 'synset': 'pedestal.n.03', 'name': 'pedestal'}, {'id': 9743, 'synset': 'pedestal_table.n.01', 'name': 'pedestal_table'}, {'id': 9744, 'synset': 'pedestrian_crossing.n.01', 'name': 'pedestrian_crossing'}, {'id': 9745, 'synset': 'pedicab.n.01', 'name': 'pedicab'}, {'id': 9746, 'synset': 'pediment.n.01', 'name': 'pediment'}, {'id': 9747, 'synset': 'pedometer.n.01', 'name': 'pedometer'}, {'id': 9748, 'synset': 'peep_sight.n.01', 'name': 'peep_sight'}, {'id': 9749, 'synset': 'peg.n.01', 'name': 'peg'}, {'id': 9750, 'synset': 'peg.n.06', 'name': 'peg'}, {'id': 9751, 'synset': 'peg.n.05', 'name': 'peg'}, {'id': 9752, 'synset': 'pelham.n.01', 'name': 'Pelham'}, {'id': 9753, 'synset': 'pelican_crossing.n.01', 'name': 'pelican_crossing'}, {'id': 9754, 'synset': 'pelisse.n.01', 'name': 'pelisse'}, {'id': 9755, 'synset': 'pelvimeter.n.01', 'name': 'pelvimeter'}, {'id': 9756, 'synset': 'penal_colony.n.01', 'name': 'penal_colony'}, {'id': 9757, 'synset': 'penal_institution.n.01', 'name': 'penal_institution'}, {'id': 9758, 'synset': 'penalty_box.n.01', 'name': 'penalty_box'}, {'id': 9759, 'synset': 'pen-and-ink.n.01', 'name': 'pen-and-ink'}, {'id': 9760, 'synset': 'pencil.n.04', 'name': 'pencil'}, {'id': 9761, 'synset': 'pendant_earring.n.01', 'name': 'pendant_earring'}, {'id': 9762, 'synset': 'pendulum_clock.n.01', 'name': 'pendulum_clock'}, {'id': 9763, 'synset': 'pendulum_watch.n.01', 'name': 'pendulum_watch'}, {'id': 9764, 'synset': 'penetration_bomb.n.01', 'name': 'penetration_bomb'}, {'id': 9765, 'synset': 'penile_implant.n.01', 'name': 'penile_implant'}, {'id': 9766, 'synset': 'penitentiary.n.01', 'name': 'penitentiary'}, {'id': 9767, 'synset': 'penknife.n.01', 'name': 'penknife'}, {'id': 9768, 'synset': 'penlight.n.01', 'name': 'penlight'}, {'id': 9769, 'synset': 'pennant.n.03', 'name': 'pennant'}, {'id': 9770, 'synset': 'pennywhistle.n.01', 'name': 'pennywhistle'}, {'id': 9771, 'synset': 'penthouse.n.01', 'name': 'penthouse'}, {'id': 9772, 'synset': 'pentode.n.01', 'name': 'pentode'}, {'id': 9773, 'synset': 'peplos.n.01', 'name': 'peplos'}, {'id': 9774, 'synset': 'peplum.n.01', 'name': 'peplum'}, {'id': 9775, 'synset': 'pepper_shaker.n.01', 'name': 'pepper_shaker'}, {'id': 9776, 'synset': 'pepper_spray.n.01', 'name': 'pepper_spray'}, {'id': 9777, 'synset': 'percale.n.01', 'name': 'percale'}, {'id': 9778, 'synset': 'percolator.n.01', 'name': 'percolator'}, {'id': 9779, 'synset': 'percussion_cap.n.01', 'name': 'percussion_cap'}, {'id': 9780, 'synset': 'percussion_instrument.n.01', 'name': 'percussion_instrument'}, {'id': 9781, 'synset': 'perforation.n.01', 'name': 'perforation'}, {'id': 9782, 'synset': 'perfumery.n.03', 'name': 'perfumery'}, {'id': 9783, 'synset': 'perfumery.n.02', 'name': 'perfumery'}, {'id': 9784, 'synset': 'perfumery.n.01', 'name': 'perfumery'}, {'id': 9785, 'synset': 'peripheral.n.01', 'name': 'peripheral'}, {'id': 9786, 'synset': 'periscope.n.01', 'name': 'periscope'}, {'id': 9787, 'synset': 'peristyle.n.01', 'name': 'peristyle'}, {'id': 9788, 'synset': 'periwig.n.01', 'name': 'periwig'}, {'id': 9789, 'synset': 'permanent_press.n.01', 'name': 'permanent_press'}, {'id': 9790, 'synset': 'perpetual_motion_machine.n.01', 'name': 'perpetual_motion_machine'}, {'id': 9791, 'synset': 'personal_computer.n.01', 'name': 'personal_computer'}, {'id': 9792, 'synset': 'personal_digital_assistant.n.01', 'name': 'personal_digital_assistant'}, {'id': 9793, 'synset': 'personnel_carrier.n.01', 'name': 'personnel_carrier'}, {'id': 9794, 'synset': 'pestle.n.03', 'name': 'pestle'}, {'id': 9795, 'synset': 'pestle.n.02', 'name': 'pestle'}, {'id': 9796, 'synset': 'petcock.n.01', 'name': 'petcock'}, {'id': 9797, 'synset': 'petri_dish.n.01', 'name': 'Petri_dish'}, {'id': 9798, 'synset': 'petrolatum_gauze.n.01', 'name': 'petrolatum_gauze'}, {'id': 9799, 'synset': 'pet_shop.n.01', 'name': 'pet_shop'}, {'id': 9800, 'synset': 'petticoat.n.01', 'name': 'petticoat'}, {'id': 9801, 'synset': 'phial.n.01', 'name': 'phial'}, {'id': 9802, 'synset': 'phillips_screw.n.01', 'name': 'Phillips_screw'}, {'id': 9803, 'synset': 'phillips_screwdriver.n.01', 'name': 'Phillips_screwdriver'}, {'id': 9804, 'synset': 'phonograph_needle.n.01', 'name': 'phonograph_needle'}, {'id': 9805, 'synset': 'photocathode.n.01', 'name': 'photocathode'}, {'id': 9806, 'synset': 'photocoagulator.n.01', 'name': 'photocoagulator'}, {'id': 9807, 'synset': 'photocopier.n.01', 'name': 'photocopier'}, {'id': 9808, 'synset': 'photographic_equipment.n.01', 'name': 'photographic_equipment'}, {'id': 9809, 'synset': 'photographic_paper.n.01', 'name': 'photographic_paper'}, {'id': 9810, 'synset': 'photometer.n.01', 'name': 'photometer'}, {'id': 9811, 'synset': 'photomicrograph.n.01', 'name': 'photomicrograph'}, {'id': 9812, 'synset': 'photostat.n.02', 'name': 'Photostat'}, {'id': 9813, 'synset': 'photostat.n.01', 'name': 'photostat'}, {'id': 9814, 'synset': 'physical_pendulum.n.01', 'name': 'physical_pendulum'}, {'id': 9815, 'synset': 'piano_action.n.01', 'name': 'piano_action'}, {'id': 9816, 'synset': 'piano_keyboard.n.01', 'name': 'piano_keyboard'}, {'id': 9817, 'synset': 'piano_wire.n.01', 'name': 'piano_wire'}, {'id': 9818, 'synset': 'piccolo.n.01', 'name': 'piccolo'}, {'id': 9819, 'synset': 'pick.n.07', 'name': 'pick'}, {'id': 9820, 'synset': 'pick.n.06', 'name': 'pick'}, {'id': 9821, 'synset': 'pick.n.05', 'name': 'pick'}, {'id': 9822, 'synset': 'pickelhaube.n.01', 'name': 'pickelhaube'}, {'id': 9823, 'synset': 'picket_boat.n.01', 'name': 'picket_boat'}, {'id': 9824, 'synset': 'picket_fence.n.01', 'name': 'picket_fence'}, {'id': 9825, 'synset': 'picket_ship.n.01', 'name': 'picket_ship'}, {'id': 9826, 'synset': 'pickle_barrel.n.01', 'name': 'pickle_barrel'}, {'id': 9827, 'synset': 'picture_frame.n.01', 'name': 'picture_frame'}, {'id': 9828, 'synset': 'picture_hat.n.01', 'name': 'picture_hat'}, {'id': 9829, 'synset': 'picture_rail.n.01', 'name': 'picture_rail'}, {'id': 9830, 'synset': 'picture_window.n.01', 'name': 'picture_window'}, {'id': 9831, 'synset': 'piece_of_cloth.n.01', 'name': 'piece_of_cloth'}, {'id': 9832, 'synset': 'pied-a-terre.n.01', 'name': 'pied-a-terre'}, {'id': 9833, 'synset': 'pier.n.03', 'name': 'pier'}, {'id': 9834, 'synset': 'pier.n.02', 'name': 'pier'}, {'id': 9835, 'synset': 'pier_arch.n.01', 'name': 'pier_arch'}, {'id': 9836, 'synset': 'pier_glass.n.01', 'name': 'pier_glass'}, {'id': 9837, 'synset': 'pier_table.n.01', 'name': 'pier_table'}, {'id': 9838, 'synset': 'pieta.n.01', 'name': 'pieta'}, {'id': 9839, 'synset': 'piezometer.n.01', 'name': 'piezometer'}, {'id': 9840, 'synset': 'pig_bed.n.01', 'name': 'pig_bed'}, {'id': 9841, 'synset': 'piggery.n.01', 'name': 'piggery'}, {'id': 9842, 'synset': 'pilaster.n.01', 'name': 'pilaster'}, {'id': 9843, 'synset': 'pile.n.06', 'name': 'pile'}, {'id': 9844, 'synset': 'pile_driver.n.01', 'name': 'pile_driver'}, {'id': 9845, 'synset': 'pill_bottle.n.01', 'name': 'pill_bottle'}, {'id': 9846, 'synset': 'pillbox.n.01', 'name': 'pillbox'}, {'id': 9847, 'synset': 'pillion.n.01', 'name': 'pillion'}, {'id': 9848, 'synset': 'pillory.n.01', 'name': 'pillory'}, {'id': 9849, 'synset': 'pillow_block.n.01', 'name': 'pillow_block'}, {'id': 9850, 'synset': 'pillow_lace.n.01', 'name': 'pillow_lace'}, {'id': 9851, 'synset': 'pillow_sham.n.01', 'name': 'pillow_sham'}, {'id': 9852, 'synset': 'pilot_bit.n.01', 'name': 'pilot_bit'}, {'id': 9853, 'synset': 'pilot_boat.n.01', 'name': 'pilot_boat'}, {'id': 9854, 'synset': 'pilot_burner.n.01', 'name': 'pilot_burner'}, {'id': 9855, 'synset': 'pilot_cloth.n.01', 'name': 'pilot_cloth'}, {'id': 9856, 'synset': 'pilot_engine.n.01', 'name': 'pilot_engine'}, {'id': 9857, 'synset': 'pilothouse.n.01', 'name': 'pilothouse'}, {'id': 9858, 'synset': 'pilot_light.n.02', 'name': 'pilot_light'}, {'id': 9859, 'synset': 'pin.n.08', 'name': 'pin'}, {'id': 9860, 'synset': 'pin.n.07', 'name': 'pin'}, {'id': 9861, 'synset': 'pinata.n.01', 'name': 'pinata'}, {'id': 9862, 'synset': 'pinball_machine.n.01', 'name': 'pinball_machine'}, {'id': 9863, 'synset': 'pince-nez.n.01', 'name': 'pince-nez'}, {'id': 9864, 'synset': 'pincer.n.01', 'name': 'pincer'}, {'id': 9865, 'synset': 'pinch_bar.n.01', 'name': 'pinch_bar'}, {'id': 9866, 'synset': 'pincurl_clip.n.01', 'name': 'pincurl_clip'}, {'id': 9867, 'synset': 'pinfold.n.01', 'name': 'pinfold'}, {'id': 9868, 'synset': 'pinhead.n.02', 'name': 'pinhead'}, {'id': 9869, 'synset': 'pinion.n.01', 'name': 'pinion'}, {'id': 9870, 'synset': 'pinnacle.n.01', 'name': 'pinnacle'}, {'id': 9871, 'synset': 'pinprick.n.02', 'name': 'pinprick'}, {'id': 9872, 'synset': 'pinstripe.n.03', 'name': 'pinstripe'}, {'id': 9873, 'synset': 'pinstripe.n.02', 'name': 'pinstripe'}, {'id': 9874, 'synset': 'pinstripe.n.01', 'name': 'pinstripe'}, {'id': 9875, 'synset': 'pintle.n.01', 'name': 'pintle'}, {'id': 9876, 'synset': 'pinwheel.n.02', 'name': 'pinwheel'}, {'id': 9877, 'synset': 'tabor_pipe.n.01', 'name': 'tabor_pipe'}, {'id': 9878, 'synset': 'pipe.n.04', 'name': 'pipe'}, {'id': 9879, 'synset': 'pipe_bomb.n.01', 'name': 'pipe_bomb'}, {'id': 9880, 'synset': 'pipe_cleaner.n.01', 'name': 'pipe_cleaner'}, {'id': 9881, 'synset': 'pipe_cutter.n.01', 'name': 'pipe_cutter'}, {'id': 9882, 'synset': 'pipefitting.n.01', 'name': 'pipefitting'}, {'id': 9883, 'synset': 'pipet.n.01', 'name': 'pipet'}, {'id': 9884, 'synset': 'pipe_vise.n.01', 'name': 'pipe_vise'}, {'id': 9885, 'synset': 'pipe_wrench.n.01', 'name': 'pipe_wrench'}, {'id': 9886, 'synset': 'pique.n.01', 'name': 'pique'}, {'id': 9887, 'synset': 'pirate.n.03', 'name': 'pirate'}, {'id': 9888, 'synset': 'piste.n.02', 'name': 'piste'}, {'id': 9889, 'synset': 'pistol_grip.n.01', 'name': 'pistol_grip'}, {'id': 9890, 'synset': 'piston.n.02', 'name': 'piston'}, {'id': 9891, 'synset': 'piston_ring.n.01', 'name': 'piston_ring'}, {'id': 9892, 'synset': 'piston_rod.n.01', 'name': 'piston_rod'}, {'id': 9893, 'synset': 'pit.n.07', 'name': 'pit'}, {'id': 9894, 'synset': 'pitching_wedge.n.01', 'name': 'pitching_wedge'}, {'id': 9895, 'synset': 'pitch_pipe.n.01', 'name': 'pitch_pipe'}, {'id': 9896, 'synset': 'pith_hat.n.01', 'name': 'pith_hat'}, {'id': 9897, 'synset': 'piton.n.01', 'name': 'piton'}, {'id': 9898, 'synset': 'pitot-static_tube.n.01', 'name': 'Pitot-static_tube'}, {'id': 9899, 'synset': 'pitot_tube.n.01', 'name': 'Pitot_tube'}, {'id': 9900, 'synset': 'pitsaw.n.01', 'name': 'pitsaw'}, {'id': 9901, 'synset': 'pivot.n.02', 'name': 'pivot'}, {'id': 9902, 'synset': 'pivoting_window.n.01', 'name': 'pivoting_window'}, {'id': 9903, 'synset': 'pizzeria.n.01', 'name': 'pizzeria'}, {'id': 9904, 'synset': 'place_of_business.n.01', 'name': 'place_of_business'}, {'id': 9905, 'synset': 'place_of_worship.n.01', 'name': 'place_of_worship'}, {'id': 9906, 'synset': 'placket.n.01', 'name': 'placket'}, {'id': 9907, 'synset': 'planchet.n.01', 'name': 'planchet'}, {'id': 9908, 'synset': 'plane.n.05', 'name': 'plane'}, {'id': 9909, 'synset': 'plane.n.04', 'name': 'plane'}, {'id': 9910, 'synset': 'plane_seat.n.01', 'name': 'plane_seat'}, {'id': 9911, 'synset': 'planetarium.n.03', 'name': 'planetarium'}, {'id': 9912, 'synset': 'planetarium.n.02', 'name': 'planetarium'}, {'id': 9913, 'synset': 'planetarium.n.01', 'name': 'planetarium'}, {'id': 9914, 'synset': 'planetary_gear.n.01', 'name': 'planetary_gear'}, {'id': 9915, 'synset': 'plank-bed.n.01', 'name': 'plank-bed'}, {'id': 9916, 'synset': 'planking.n.02', 'name': 'planking'}, {'id': 9917, 'synset': 'planner.n.02', 'name': 'planner'}, {'id': 9918, 'synset': 'plant.n.01', 'name': 'plant'}, {'id': 9919, 'synset': 'planter.n.03', 'name': 'planter'}, {'id': 9920, 'synset': 'plaster.n.05', 'name': 'plaster'}, {'id': 9921, 'synset': 'plasterboard.n.01', 'name': 'plasterboard'}, {'id': 9922, 'synset': 'plastering_trowel.n.01', 'name': 'plastering_trowel'}, {'id': 9923, 'synset': 'plastic_bag.n.01', 'name': 'plastic_bag'}, {'id': 9924, 'synset': 'plastic_bomb.n.01', 'name': 'plastic_bomb'}, {'id': 9925, 'synset': 'plastic_laminate.n.01', 'name': 'plastic_laminate'}, {'id': 9926, 'synset': 'plastic_wrap.n.01', 'name': 'plastic_wrap'}, {'id': 9927, 'synset': 'plastron.n.03', 'name': 'plastron'}, {'id': 9928, 'synset': 'plastron.n.02', 'name': 'plastron'}, {'id': 9929, 'synset': 'plastron.n.01', 'name': 'plastron'}, {'id': 9930, 'synset': 'plate.n.14', 'name': 'plate'}, {'id': 9931, 'synset': 'plate.n.13', 'name': 'plate'}, {'id': 9932, 'synset': 'plate.n.12', 'name': 'plate'}, {'id': 9933, 'synset': 'platen.n.03', 'name': 'platen'}, {'id': 9934, 'synset': 'platen.n.01', 'name': 'platen'}, {'id': 9935, 'synset': 'plate_rack.n.01', 'name': 'plate_rack'}, {'id': 9936, 'synset': 'plate_rail.n.01', 'name': 'plate_rail'}, {'id': 9937, 'synset': 'platform.n.01', 'name': 'platform'}, {'id': 9938, 'synset': 'platform.n.04', 'name': 'platform'}, {'id': 9939, 'synset': 'platform.n.03', 'name': 'platform'}, {'id': 9940, 'synset': 'platform_bed.n.01', 'name': 'platform_bed'}, {'id': 9941, 'synset': 'platform_rocker.n.01', 'name': 'platform_rocker'}, {'id': 9942, 'synset': 'plating.n.01', 'name': 'plating'}, {'id': 9943, 'synset': 'playback.n.02', 'name': 'playback'}, {'id': 9944, 'synset': 'playbox.n.01', 'name': 'playbox'}, {'id': 9945, 'synset': 'playground.n.02', 'name': 'playground'}, {'id': 9946, 'synset': 'playsuit.n.01', 'name': 'playsuit'}, {'id': 9947, 'synset': 'plaza.n.02', 'name': 'plaza'}, {'id': 9948, 'synset': 'pleat.n.01', 'name': 'pleat'}, {'id': 9949, 'synset': 'plenum.n.02', 'name': 'plenum'}, {'id': 9950, 'synset': 'plethysmograph.n.01', 'name': 'plethysmograph'}, {'id': 9951, 'synset': 'pleximeter.n.01', 'name': 'pleximeter'}, {'id': 9952, 'synset': 'plexor.n.01', 'name': 'plexor'}, {'id': 9953, 'synset': 'plimsoll.n.02', 'name': 'plimsoll'}, {'id': 9954, 'synset': 'plotter.n.04', 'name': 'plotter'}, {'id': 9955, 'synset': 'plug.n.01', 'name': 'plug'}, {'id': 9956, 'synset': 'plug.n.05', 'name': 'plug'}, {'id': 9957, 'synset': 'plug_fuse.n.01', 'name': 'plug_fuse'}, {'id': 9958, 'synset': 'plughole.n.01', 'name': 'plughole'}, {'id': 9959, 'synset': 'plumb_bob.n.01', 'name': 'plumb_bob'}, {'id': 9960, 'synset': 'plumb_level.n.01', 'name': 'plumb_level'}, {'id': 9961, 'synset': 'plunger.n.03', 'name': 'plunger'}, {'id': 9962, 'synset': 'plus_fours.n.01', 'name': 'plus_fours'}, {'id': 9963, 'synset': 'plush.n.01', 'name': 'plush'}, {'id': 9964, 'synset': 'plywood.n.01', 'name': 'plywood'}, {'id': 9965, 'synset': 'pneumatic_drill.n.01', 'name': 'pneumatic_drill'}, {'id': 9966, 'synset': 'p-n_junction.n.01', 'name': 'p-n_junction'}, {'id': 9967, 'synset': 'p-n-p_transistor.n.01', 'name': 'p-n-p_transistor'}, {'id': 9968, 'synset': 'poacher.n.02', 'name': 'poacher'}, {'id': 9969, 'synset': 'pocket.n.01', 'name': 'pocket'}, {'id': 9970, 'synset': 'pocket_battleship.n.01', 'name': 'pocket_battleship'}, {'id': 9971, 'synset': 'pocketcomb.n.01', 'name': 'pocketcomb'}, {'id': 9972, 'synset': 'pocket_flap.n.01', 'name': 'pocket_flap'}, {'id': 9973, 'synset': 'pocket-handkerchief.n.01', 'name': 'pocket-handkerchief'}, {'id': 9974, 'synset': 'pod.n.04', 'name': 'pod'}, {'id': 9975, 'synset': 'pogo_stick.n.01', 'name': 'pogo_stick'}, {'id': 9976, 'synset': 'point-and-shoot_camera.n.01', 'name': 'point-and-shoot_camera'}, {'id': 9977, 'synset': 'pointed_arch.n.01', 'name': 'pointed_arch'}, {'id': 9978, 'synset': 'pointing_trowel.n.01', 'name': 'pointing_trowel'}, {'id': 9979, 'synset': 'point_lace.n.01', 'name': 'point_lace'}, {'id': 9980, 'synset': 'polarimeter.n.01', 'name': 'polarimeter'}, {'id': 9981, 'synset': 'polaroid.n.01', 'name': 'Polaroid'}, {'id': 9982, 'synset': 'polaroid_camera.n.01', 'name': 'Polaroid_camera'}, {'id': 9983, 'synset': 'pole.n.09', 'name': 'pole'}, {'id': 9984, 'synset': 'poleax.n.02', 'name': 'poleax'}, {'id': 9985, 'synset': 'poleax.n.01', 'name': 'poleax'}, {'id': 9986, 'synset': 'police_boat.n.01', 'name': 'police_boat'}, {'id': 9987, 'synset': 'police_van.n.01', 'name': 'police_van'}, {'id': 9988, 'synset': 'polling_booth.n.01', 'name': 'polling_booth'}, {'id': 9989, 'synset': 'polo_ball.n.01', 'name': 'polo_ball'}, {'id': 9990, 'synset': 'polo_mallet.n.01', 'name': 'polo_mallet'}, {'id': 9991, 'synset': 'polonaise.n.01', 'name': 'polonaise'}, {'id': 9992, 'synset': 'polyester.n.03', 'name': 'polyester'}, {'id': 9993, 'synset': 'polygraph.n.01', 'name': 'polygraph'}, {'id': 9994, 'synset': 'pomade.n.01', 'name': 'pomade'}, {'id': 9995, 'synset': 'pommel_horse.n.01', 'name': 'pommel_horse'}, {'id': 9996, 'synset': 'pongee.n.01', 'name': 'pongee'}, {'id': 9997, 'synset': 'poniard.n.01', 'name': 'poniard'}, {'id': 9998, 'synset': 'pontifical.n.01', 'name': 'pontifical'}, {'id': 9999, 'synset': 'pontoon.n.01', 'name': 'pontoon'}, {'id': 10000, 'synset': 'pontoon_bridge.n.01', 'name': 'pontoon_bridge'}, {'id': 10001, 'synset': 'pony_cart.n.01', 'name': 'pony_cart'}, {'id': 10002, 'synset': 'pool_ball.n.01', 'name': 'pool_ball'}, {'id': 10003, 'synset': 'poolroom.n.01', 'name': 'poolroom'}, {'id': 10004, 'synset': 'poop_deck.n.01', 'name': 'poop_deck'}, {'id': 10005, 'synset': 'poor_box.n.01', 'name': 'poor_box'}, {'id': 10006, 'synset': 'poorhouse.n.01', 'name': 'poorhouse'}, {'id': 10007, 'synset': 'pop_bottle.n.01', 'name': 'pop_bottle'}, {'id': 10008, 'synset': 'popgun.n.01', 'name': 'popgun'}, {'id': 10009, 'synset': 'poplin.n.01', 'name': 'poplin'}, {'id': 10010, 'synset': 'popper.n.03', 'name': 'popper'}, {'id': 10011, 'synset': 'poppet.n.01', 'name': 'poppet'}, {'id': 10012, 'synset': 'pop_tent.n.01', 'name': 'pop_tent'}, {'id': 10013, 'synset': 'porcelain.n.01', 'name': 'porcelain'}, {'id': 10014, 'synset': 'porch.n.01', 'name': 'porch'}, {'id': 10015, 'synset': 'porkpie.n.01', 'name': 'porkpie'}, {'id': 10016, 'synset': 'porringer.n.01', 'name': 'porringer'}, {'id': 10017, 'synset': 'portable.n.01', 'name': 'portable'}, {'id': 10018, 'synset': 'portable_computer.n.01', 'name': 'portable_computer'}, {'id': 10019, 'synset': 'portable_circular_saw.n.01', 'name': 'portable_circular_saw'}, {'id': 10020, 'synset': 'portcullis.n.01', 'name': 'portcullis'}, {'id': 10021, 'synset': 'porte-cochere.n.02', 'name': 'porte-cochere'}, {'id': 10022, 'synset': 'porte-cochere.n.01', 'name': 'porte-cochere'}, {'id': 10023, 'synset': 'portfolio.n.01', 'name': 'portfolio'}, {'id': 10024, 'synset': 'porthole.n.01', 'name': 'porthole'}, {'id': 10025, 'synset': 'portico.n.01', 'name': 'portico'}, {'id': 10026, 'synset': 'portiere.n.01', 'name': 'portiere'}, {'id': 10027, 'synset': 'portmanteau.n.02', 'name': 'portmanteau'}, {'id': 10028, 'synset': 'portrait_camera.n.01', 'name': 'portrait_camera'}, {'id': 10029, 'synset': 'portrait_lens.n.01', 'name': 'portrait_lens'}, {'id': 10030, 'synset': 'positive_pole.n.02', 'name': 'positive_pole'}, {'id': 10031, 'synset': 'positive_pole.n.01', 'name': 'positive_pole'}, {'id': 10032, 'synset': 'positron_emission_tomography_scanner.n.01', 'name': 'positron_emission_tomography_scanner'}, {'id': 10033, 'synset': 'post.n.04', 'name': 'post'}, {'id': 10034, 'synset': 'postage_meter.n.01', 'name': 'postage_meter'}, {'id': 10035, 'synset': 'post_and_lintel.n.01', 'name': 'post_and_lintel'}, {'id': 10036, 'synset': 'post_chaise.n.01', 'name': 'post_chaise'}, {'id': 10037, 'synset': 'postern.n.01', 'name': 'postern'}, {'id': 10038, 'synset': 'post_exchange.n.01', 'name': 'post_exchange'}, {'id': 10039, 'synset': 'posthole_digger.n.01', 'name': 'posthole_digger'}, {'id': 10040, 'synset': 'post_horn.n.01', 'name': 'post_horn'}, {'id': 10041, 'synset': 'posthouse.n.01', 'name': 'posthouse'}, {'id': 10042, 'synset': 'potbelly.n.02', 'name': 'potbelly'}, {'id': 10043, 'synset': 'potemkin_village.n.01', 'name': 'Potemkin_village'}, {'id': 10044, 'synset': 'potential_divider.n.01', 'name': 'potential_divider'}, {'id': 10045, 'synset': 'potentiometer.n.02', 'name': 'potentiometer'}, {'id': 10046, 'synset': 'potentiometer.n.01', 'name': 'potentiometer'}, {'id': 10047, 'synset': 'potpourri.n.03', 'name': 'potpourri'}, {'id': 10048, 'synset': 'potsherd.n.01', 'name': 'potsherd'}, {'id': 10049, 'synset': "potter's_wheel.n.01", 'name': "potter's_wheel"}, {'id': 10050, 'synset': 'pottle.n.01', 'name': 'pottle'}, {'id': 10051, 'synset': 'potty_seat.n.01', 'name': 'potty_seat'}, {'id': 10052, 'synset': 'poultice.n.01', 'name': 'poultice'}, {'id': 10053, 'synset': 'pound.n.13', 'name': 'pound'}, {'id': 10054, 'synset': 'pound_net.n.01', 'name': 'pound_net'}, {'id': 10055, 'synset': 'powder.n.03', 'name': 'powder'}, {'id': 10056, 'synset': 'powder_and_shot.n.01', 'name': 'powder_and_shot'}, {'id': 10057, 'synset': 'powdered_mustard.n.01', 'name': 'powdered_mustard'}, {'id': 10058, 'synset': 'powder_horn.n.01', 'name': 'powder_horn'}, {'id': 10059, 'synset': 'powder_keg.n.02', 'name': 'powder_keg'}, {'id': 10060, 'synset': 'power_brake.n.01', 'name': 'power_brake'}, {'id': 10061, 'synset': 'power_cord.n.01', 'name': 'power_cord'}, {'id': 10062, 'synset': 'power_drill.n.01', 'name': 'power_drill'}, {'id': 10063, 'synset': 'power_line.n.01', 'name': 'power_line'}, {'id': 10064, 'synset': 'power_loom.n.01', 'name': 'power_loom'}, {'id': 10065, 'synset': 'power_mower.n.01', 'name': 'power_mower'}, {'id': 10066, 'synset': 'power_pack.n.01', 'name': 'power_pack'}, {'id': 10067, 'synset': 'power_saw.n.01', 'name': 'power_saw'}, {'id': 10068, 'synset': 'power_steering.n.01', 'name': 'power_steering'}, {'id': 10069, 'synset': 'power_takeoff.n.01', 'name': 'power_takeoff'}, {'id': 10070, 'synset': 'power_tool.n.01', 'name': 'power_tool'}, {'id': 10071, 'synset': 'praetorium.n.01', 'name': 'praetorium'}, {'id': 10072, 'synset': 'prayer_rug.n.01', 'name': 'prayer_rug'}, {'id': 10073, 'synset': 'prayer_shawl.n.01', 'name': 'prayer_shawl'}, {'id': 10074, 'synset': 'precipitator.n.01', 'name': 'precipitator'}, {'id': 10075, 'synset': 'prefab.n.01', 'name': 'prefab'}, {'id': 10076, 'synset': 'presbytery.n.01', 'name': 'presbytery'}, {'id': 10077, 'synset': 'presence_chamber.n.01', 'name': 'presence_chamber'}, {'id': 10078, 'synset': 'press.n.07', 'name': 'press'}, {'id': 10079, 'synset': 'press.n.03', 'name': 'press'}, {'id': 10080, 'synset': 'press.n.06', 'name': 'press'}, {'id': 10081, 'synset': 'press_box.n.01', 'name': 'press_box'}, {'id': 10082, 'synset': 'press_gallery.n.01', 'name': 'press_gallery'}, {'id': 10083, 'synset': 'press_of_sail.n.01', 'name': 'press_of_sail'}, {'id': 10084, 'synset': 'pressure_cabin.n.01', 'name': 'pressure_cabin'}, {'id': 10085, 'synset': 'pressure_cooker.n.01', 'name': 'pressure_cooker'}, {'id': 10086, 'synset': 'pressure_dome.n.01', 'name': 'pressure_dome'}, {'id': 10087, 'synset': 'pressure_gauge.n.01', 'name': 'pressure_gauge'}, {'id': 10088, 'synset': 'pressurized_water_reactor.n.01', 'name': 'pressurized_water_reactor'}, {'id': 10089, 'synset': 'pressure_suit.n.01', 'name': 'pressure_suit'}, {'id': 10090, 'synset': 'pricket.n.01', 'name': 'pricket'}, {'id': 10091, 'synset': 'prie-dieu.n.01', 'name': 'prie-dieu'}, {'id': 10092, 'synset': 'primary_coil.n.01', 'name': 'primary_coil'}, {'id': 10093, 'synset': 'primus_stove.n.01', 'name': 'Primus_stove'}, {'id': 10094, 'synset': 'prince_albert.n.02', 'name': 'Prince_Albert'}, {'id': 10095, 'synset': 'print.n.06', 'name': 'print'}, {'id': 10096, 'synset': 'print_buffer.n.01', 'name': 'print_buffer'}, {'id': 10097, 'synset': 'printed_circuit.n.01', 'name': 'printed_circuit'}, {'id': 10098, 'synset': 'printer.n.02', 'name': 'printer'}, {'id': 10099, 'synset': 'printer_cable.n.01', 'name': 'printer_cable'}, {'id': 10100, 'synset': 'priory.n.01', 'name': 'priory'}, {'id': 10101, 'synset': 'prison.n.01', 'name': 'prison'}, {'id': 10102, 'synset': 'prison_camp.n.01', 'name': 'prison_camp'}, {'id': 10103, 'synset': 'privateer.n.02', 'name': 'privateer'}, {'id': 10104, 'synset': 'private_line.n.01', 'name': 'private_line'}, {'id': 10105, 'synset': 'privet_hedge.n.01', 'name': 'privet_hedge'}, {'id': 10106, 'synset': 'probe.n.02', 'name': 'probe'}, {'id': 10107, 'synset': 'proctoscope.n.01', 'name': 'proctoscope'}, {'id': 10108, 'synset': 'prod.n.02', 'name': 'prod'}, {'id': 10109, 'synset': 'production_line.n.01', 'name': 'production_line'}, {'id': 10110, 'synset': 'projector.n.01', 'name': 'projector'}, {'id': 10111, 'synset': 'prolonge.n.01', 'name': 'prolonge'}, {'id': 10112, 'synset': 'prolonge_knot.n.01', 'name': 'prolonge_knot'}, {'id': 10113, 'synset': 'prompter.n.02', 'name': 'prompter'}, {'id': 10114, 'synset': 'prong.n.01', 'name': 'prong'}, {'id': 10115, 'synset': 'propeller_plane.n.01', 'name': 'propeller_plane'}, {'id': 10116, 'synset': 'propjet.n.01', 'name': 'propjet'}, {'id': 10117, 'synset': 'proportional_counter_tube.n.01', 'name': 'proportional_counter_tube'}, {'id': 10118, 'synset': 'propulsion_system.n.01', 'name': 'propulsion_system'}, {'id': 10119, 'synset': 'proscenium.n.02', 'name': 'proscenium'}, {'id': 10120, 'synset': 'proscenium_arch.n.01', 'name': 'proscenium_arch'}, {'id': 10121, 'synset': 'prosthesis.n.01', 'name': 'prosthesis'}, {'id': 10122, 'synset': 'protective_covering.n.01', 'name': 'protective_covering'}, {'id': 10123, 'synset': 'protective_garment.n.01', 'name': 'protective_garment'}, {'id': 10124, 'synset': 'proton_accelerator.n.01', 'name': 'proton_accelerator'}, {'id': 10125, 'synset': 'protractor.n.01', 'name': 'protractor'}, {'id': 10126, 'synset': 'pruner.n.02', 'name': 'pruner'}, {'id': 10127, 'synset': 'pruning_knife.n.01', 'name': 'pruning_knife'}, {'id': 10128, 'synset': 'pruning_saw.n.01', 'name': 'pruning_saw'}, {'id': 10129, 'synset': 'pruning_shears.n.01', 'name': 'pruning_shears'}, {'id': 10130, 'synset': 'psaltery.n.01', 'name': 'psaltery'}, {'id': 10131, 'synset': 'psychrometer.n.01', 'name': 'psychrometer'}, {'id': 10132, 'synset': 'pt_boat.n.01', 'name': 'PT_boat'}, {'id': 10133, 'synset': 'public_address_system.n.01', 'name': 'public_address_system'}, {'id': 10134, 'synset': 'public_house.n.01', 'name': 'public_house'}, {'id': 10135, 'synset': 'public_toilet.n.01', 'name': 'public_toilet'}, {'id': 10136, 'synset': 'public_transport.n.01', 'name': 'public_transport'}, {'id': 10137, 'synset': 'public_works.n.01', 'name': 'public_works'}, {'id': 10138, 'synset': 'puck.n.02', 'name': 'puck'}, {'id': 10139, 'synset': 'pull.n.04', 'name': 'pull'}, {'id': 10140, 'synset': 'pullback.n.01', 'name': 'pullback'}, {'id': 10141, 'synset': 'pull_chain.n.01', 'name': 'pull_chain'}, {'id': 10142, 'synset': 'pulley.n.01', 'name': 'pulley'}, {'id': 10143, 'synset': 'pull-off.n.01', 'name': 'pull-off'}, {'id': 10144, 'synset': 'pullman.n.01', 'name': 'Pullman'}, {'id': 10145, 'synset': 'pullover.n.01', 'name': 'pullover'}, {'id': 10146, 'synset': 'pull-through.n.01', 'name': 'pull-through'}, {'id': 10147, 'synset': 'pulse_counter.n.01', 'name': 'pulse_counter'}, {'id': 10148, 'synset': 'pulse_generator.n.01', 'name': 'pulse_generator'}, {'id': 10149, 'synset': 'pulse_timing_circuit.n.01', 'name': 'pulse_timing_circuit'}, {'id': 10150, 'synset': 'pump.n.01', 'name': 'pump'}, {'id': 10151, 'synset': 'pump.n.03', 'name': 'pump'}, {'id': 10152, 'synset': 'pump_action.n.01', 'name': 'pump_action'}, {'id': 10153, 'synset': 'pump_house.n.01', 'name': 'pump_house'}, {'id': 10154, 'synset': 'pump_room.n.01', 'name': 'pump_room'}, {'id': 10155, 'synset': 'pump-type_pliers.n.01', 'name': 'pump-type_pliers'}, {'id': 10156, 'synset': 'pump_well.n.01', 'name': 'pump_well'}, {'id': 10157, 'synset': 'punchboard.n.01', 'name': 'punchboard'}, {'id': 10158, 'synset': 'punch_bowl.n.01', 'name': 'punch_bowl'}, {'id': 10159, 'synset': 'punching_bag.n.02', 'name': 'punching_bag'}, {'id': 10160, 'synset': 'punch_pliers.n.01', 'name': 'punch_pliers'}, {'id': 10161, 'synset': 'punch_press.n.01', 'name': 'punch_press'}, {'id': 10162, 'synset': 'punnet.n.01', 'name': 'punnet'}, {'id': 10163, 'synset': 'punt.n.02', 'name': 'punt'}, {'id': 10164, 'synset': 'pup_tent.n.01', 'name': 'pup_tent'}, {'id': 10165, 'synset': 'purdah.n.03', 'name': 'purdah'}, {'id': 10166, 'synset': 'purifier.n.01', 'name': 'purifier'}, {'id': 10167, 'synset': 'purl.n.02', 'name': 'purl'}, {'id': 10168, 'synset': 'purse.n.03', 'name': 'purse'}, {'id': 10169, 'synset': 'push-bike.n.01', 'name': 'push-bike'}, {'id': 10170, 'synset': 'push_broom.n.01', 'name': 'push_broom'}, {'id': 10171, 'synset': 'push_button.n.01', 'name': 'push_button'}, {'id': 10172, 'synset': 'push-button_radio.n.01', 'name': 'push-button_radio'}, {'id': 10173, 'synset': 'pusher.n.04', 'name': 'pusher'}, {'id': 10174, 'synset': 'put-put.n.01', 'name': 'put-put'}, {'id': 10175, 'synset': 'puttee.n.01', 'name': 'puttee'}, {'id': 10176, 'synset': 'putter.n.02', 'name': 'putter'}, {'id': 10177, 'synset': 'putty_knife.n.01', 'name': 'putty_knife'}, {'id': 10178, 'synset': 'puzzle.n.02', 'name': 'puzzle'}, {'id': 10179, 'synset': 'pylon.n.02', 'name': 'pylon'}, {'id': 10180, 'synset': 'pylon.n.01', 'name': 'pylon'}, {'id': 10181, 'synset': 'pyramidal_tent.n.01', 'name': 'pyramidal_tent'}, {'id': 10182, 'synset': 'pyrograph.n.01', 'name': 'pyrograph'}, {'id': 10183, 'synset': 'pyrometer.n.01', 'name': 'pyrometer'}, {'id': 10184, 'synset': 'pyrometric_cone.n.01', 'name': 'pyrometric_cone'}, {'id': 10185, 'synset': 'pyrostat.n.01', 'name': 'pyrostat'}, {'id': 10186, 'synset': 'pyx.n.02', 'name': 'pyx'}, {'id': 10187, 'synset': 'pyx.n.01', 'name': 'pyx'}, {'id': 10188, 'synset': 'pyxis.n.03', 'name': 'pyxis'}, {'id': 10189, 'synset': 'quad.n.04', 'name': 'quad'}, {'id': 10190, 'synset': 'quadrant.n.04', 'name': 'quadrant'}, {'id': 10191, 'synset': 'quadraphony.n.01', 'name': 'quadraphony'}, {'id': 10192, 'synset': 'quartering.n.02', 'name': 'quartering'}, {'id': 10193, 'synset': 'quarterstaff.n.01', 'name': 'quarterstaff'}, {'id': 10194, 'synset': 'quartz_battery.n.01', 'name': 'quartz_battery'}, {'id': 10195, 'synset': 'quartz_lamp.n.01', 'name': 'quartz_lamp'}, {'id': 10196, 'synset': 'queen.n.08', 'name': 'queen'}, {'id': 10197, 'synset': 'queen.n.07', 'name': 'queen'}, {'id': 10198, 'synset': 'queen_post.n.01', 'name': 'queen_post'}, {'id': 10199, 'synset': 'quern.n.01', 'name': 'quern'}, {'id': 10200, 'synset': 'quill.n.01', 'name': 'quill'}, {'id': 10201, 'synset': 'quilted_bedspread.n.01', 'name': 'quilted_bedspread'}, {'id': 10202, 'synset': 'quilting.n.02', 'name': 'quilting'}, {'id': 10203, 'synset': 'quipu.n.01', 'name': 'quipu'}, {'id': 10204, 'synset': 'quirk_molding.n.01', 'name': 'quirk_molding'}, {'id': 10205, 'synset': 'quirt.n.01', 'name': 'quirt'}, {'id': 10206, 'synset': 'quiver.n.03', 'name': 'quiver'}, {'id': 10207, 'synset': 'quoin.n.02', 'name': 'quoin'}, {'id': 10208, 'synset': 'quoit.n.01', 'name': 'quoit'}, {'id': 10209, 'synset': 'qwerty_keyboard.n.01', 'name': 'QWERTY_keyboard'}, {'id': 10210, 'synset': 'rabbet.n.01', 'name': 'rabbet'}, {'id': 10211, 'synset': 'rabbet_joint.n.01', 'name': 'rabbet_joint'}, {'id': 10212, 'synset': 'rabbit_ears.n.01', 'name': 'rabbit_ears'}, {'id': 10213, 'synset': 'rabbit_hutch.n.01', 'name': 'rabbit_hutch'}, {'id': 10214, 'synset': 'raceabout.n.01', 'name': 'raceabout'}, {'id': 10215, 'synset': 'raceway.n.01', 'name': 'raceway'}, {'id': 10216, 'synset': 'racing_boat.n.01', 'name': 'racing_boat'}, {'id': 10217, 'synset': 'racing_gig.n.01', 'name': 'racing_gig'}, {'id': 10218, 'synset': 'racing_skiff.n.01', 'name': 'racing_skiff'}, {'id': 10219, 'synset': 'rack.n.05', 'name': 'rack'}, {'id': 10220, 'synset': 'rack.n.01', 'name': 'rack'}, {'id': 10221, 'synset': 'rack.n.04', 'name': 'rack'}, {'id': 10222, 'synset': 'rack_and_pinion.n.01', 'name': 'rack_and_pinion'}, {'id': 10223, 'synset': 'racquetball.n.01', 'name': 'racquetball'}, {'id': 10224, 'synset': 'radial.n.01', 'name': 'radial'}, {'id': 10225, 'synset': 'radial_engine.n.01', 'name': 'radial_engine'}, {'id': 10226, 'synset': 'radiation_pyrometer.n.01', 'name': 'radiation_pyrometer'}, {'id': 10227, 'synset': 'radiator.n.02', 'name': 'radiator'}, {'id': 10228, 'synset': 'radiator_cap.n.01', 'name': 'radiator_cap'}, {'id': 10229, 'synset': 'radiator_hose.n.01', 'name': 'radiator_hose'}, {'id': 10230, 'synset': 'radio.n.03', 'name': 'radio'}, {'id': 10231, 'synset': 'radio_antenna.n.01', 'name': 'radio_antenna'}, {'id': 10232, 'synset': 'radio_chassis.n.01', 'name': 'radio_chassis'}, {'id': 10233, 'synset': 'radio_compass.n.01', 'name': 'radio_compass'}, {'id': 10234, 'synset': 'radiogram.n.02', 'name': 'radiogram'}, {'id': 10235, 'synset': 'radio_interferometer.n.01', 'name': 'radio_interferometer'}, {'id': 10236, 'synset': 'radio_link.n.01', 'name': 'radio_link'}, {'id': 10237, 'synset': 'radiometer.n.01', 'name': 'radiometer'}, {'id': 10238, 'synset': 'radiomicrometer.n.01', 'name': 'radiomicrometer'}, {'id': 10239, 'synset': 'radio-phonograph.n.01', 'name': 'radio-phonograph'}, {'id': 10240, 'synset': 'radiotelegraph.n.02', 'name': 'radiotelegraph'}, {'id': 10241, 'synset': 'radiotelephone.n.02', 'name': 'radiotelephone'}, {'id': 10242, 'synset': 'radio_telescope.n.01', 'name': 'radio_telescope'}, {'id': 10243, 'synset': 'radiotherapy_equipment.n.01', 'name': 'radiotherapy_equipment'}, {'id': 10244, 'synset': 'radio_transmitter.n.01', 'name': 'radio_transmitter'}, {'id': 10245, 'synset': 'radome.n.01', 'name': 'radome'}, {'id': 10246, 'synset': 'rafter.n.01', 'name': 'rafter'}, {'id': 10247, 'synset': 'raft_foundation.n.01', 'name': 'raft_foundation'}, {'id': 10248, 'synset': 'rag.n.01', 'name': 'rag'}, {'id': 10249, 'synset': 'ragbag.n.02', 'name': 'ragbag'}, {'id': 10250, 'synset': 'raglan.n.01', 'name': 'raglan'}, {'id': 10251, 'synset': 'raglan_sleeve.n.01', 'name': 'raglan_sleeve'}, {'id': 10252, 'synset': 'rail.n.04', 'name': 'rail'}, {'id': 10253, 'synset': 'rail_fence.n.01', 'name': 'rail_fence'}, {'id': 10254, 'synset': 'railhead.n.01', 'name': 'railhead'}, {'id': 10255, 'synset': 'railing.n.01', 'name': 'railing'}, {'id': 10256, 'synset': 'railing.n.02', 'name': 'railing'}, {'id': 10257, 'synset': 'railroad_bed.n.01', 'name': 'railroad_bed'}, {'id': 10258, 'synset': 'railroad_tunnel.n.01', 'name': 'railroad_tunnel'}, {'id': 10259, 'synset': 'rain_barrel.n.01', 'name': 'rain_barrel'}, {'id': 10260, 'synset': 'rain_gauge.n.01', 'name': 'rain_gauge'}, {'id': 10261, 'synset': 'rain_stick.n.01', 'name': 'rain_stick'}, {'id': 10262, 'synset': 'rake.n.03', 'name': 'rake'}, {'id': 10263, 'synset': 'rake_handle.n.01', 'name': 'rake_handle'}, {'id': 10264, 'synset': 'ram_disk.n.01', 'name': 'RAM_disk'}, {'id': 10265, 'synset': 'ramekin.n.02', 'name': 'ramekin'}, {'id': 10266, 'synset': 'ramjet.n.01', 'name': 'ramjet'}, {'id': 10267, 'synset': 'rammer.n.01', 'name': 'rammer'}, {'id': 10268, 'synset': 'ramp.n.01', 'name': 'ramp'}, {'id': 10269, 'synset': 'rampant_arch.n.01', 'name': 'rampant_arch'}, {'id': 10270, 'synset': 'rampart.n.01', 'name': 'rampart'}, {'id': 10271, 'synset': 'ramrod.n.01', 'name': 'ramrod'}, {'id': 10272, 'synset': 'ramrod.n.03', 'name': 'ramrod'}, {'id': 10273, 'synset': 'ranch.n.01', 'name': 'ranch'}, {'id': 10274, 'synset': 'ranch_house.n.01', 'name': 'ranch_house'}, {'id': 10275, 'synset': 'random-access_memory.n.01', 'name': 'random-access_memory'}, {'id': 10276, 'synset': 'rangefinder.n.01', 'name': 'rangefinder'}, {'id': 10277, 'synset': 'range_hood.n.01', 'name': 'range_hood'}, {'id': 10278, 'synset': 'range_pole.n.01', 'name': 'range_pole'}, {'id': 10279, 'synset': 'rapier.n.01', 'name': 'rapier'}, {'id': 10280, 'synset': 'rariora.n.01', 'name': 'rariora'}, {'id': 10281, 'synset': 'rasp.n.02', 'name': 'rasp'}, {'id': 10282, 'synset': 'ratchet.n.01', 'name': 'ratchet'}, {'id': 10283, 'synset': 'ratchet_wheel.n.01', 'name': 'ratchet_wheel'}, {'id': 10284, 'synset': 'rathskeller.n.01', 'name': 'rathskeller'}, {'id': 10285, 'synset': 'ratline.n.01', 'name': 'ratline'}, {'id': 10286, 'synset': 'rat-tail_file.n.01', 'name': 'rat-tail_file'}, {'id': 10287, 'synset': 'rattan.n.03', 'name': 'rattan'}, {'id': 10288, 'synset': 'rattrap.n.03', 'name': 'rattrap'}, {'id': 10289, 'synset': 'rayon.n.01', 'name': 'rayon'}, {'id': 10290, 'synset': 'razor.n.01', 'name': 'razor'}, {'id': 10291, 'synset': 'reaction-propulsion_engine.n.01', 'name': 'reaction-propulsion_engine'}, {'id': 10292, 'synset': 'reaction_turbine.n.01', 'name': 'reaction_turbine'}, {'id': 10293, 'synset': 'reactor.n.01', 'name': 'reactor'}, {'id': 10294, 'synset': 'reading_lamp.n.01', 'name': 'reading_lamp'}, {'id': 10295, 'synset': 'reading_room.n.01', 'name': 'reading_room'}, {'id': 10296, 'synset': 'read-only_memory.n.01', 'name': 'read-only_memory'}, {'id': 10297, 'synset': 'read-only_memory_chip.n.01', 'name': 'read-only_memory_chip'}, {'id': 10298, 'synset': 'readout.n.03', 'name': 'readout'}, {'id': 10299, 'synset': 'read/write_head.n.01', 'name': 'read/write_head'}, {'id': 10300, 'synset': 'ready-to-wear.n.01', 'name': 'ready-to-wear'}, {'id': 10301, 'synset': 'real_storage.n.01', 'name': 'real_storage'}, {'id': 10302, 'synset': 'reamer.n.02', 'name': 'reamer'}, {'id': 10303, 'synset': 'reaumur_thermometer.n.01', 'name': 'Reaumur_thermometer'}, {'id': 10304, 'synset': 'rebozo.n.01', 'name': 'rebozo'}, {'id': 10305, 'synset': 'receiver.n.01', 'name': 'receiver'}, {'id': 10306, 'synset': 'receptacle.n.01', 'name': 'receptacle'}, {'id': 10307, 'synset': 'reception_desk.n.01', 'name': 'reception_desk'}, {'id': 10308, 'synset': 'reception_room.n.01', 'name': 'reception_room'}, {'id': 10309, 'synset': 'recess.n.04', 'name': 'recess'}, {'id': 10310, 'synset': 'reciprocating_engine.n.01', 'name': 'reciprocating_engine'}, {'id': 10311, 'synset': 'reconnaissance_plane.n.01', 'name': 'reconnaissance_plane'}, {'id': 10312, 'synset': 'reconnaissance_vehicle.n.01', 'name': 'reconnaissance_vehicle'}, {'id': 10313, 'synset': 'record_changer.n.01', 'name': 'record_changer'}, {'id': 10314, 'synset': 'recorder.n.01', 'name': 'recorder'}, {'id': 10315, 'synset': 'recording.n.03', 'name': 'recording'}, {'id': 10316, 'synset': 'recording_system.n.01', 'name': 'recording_system'}, {'id': 10317, 'synset': 'record_sleeve.n.01', 'name': 'record_sleeve'}, {'id': 10318, 'synset': 'recovery_room.n.01', 'name': 'recovery_room'}, {'id': 10319, 'synset': 'recreational_vehicle.n.01', 'name': 'recreational_vehicle'}, {'id': 10320, 'synset': 'recreation_room.n.01', 'name': 'recreation_room'}, {'id': 10321, 'synset': 'recycling_bin.n.01', 'name': 'recycling_bin'}, {'id': 10322, 'synset': 'recycling_plant.n.01', 'name': 'recycling_plant'}, {'id': 10323, 'synset': 'redbrick_university.n.01', 'name': 'redbrick_university'}, {'id': 10324, 'synset': 'red_carpet.n.01', 'name': 'red_carpet'}, {'id': 10325, 'synset': 'redoubt.n.02', 'name': 'redoubt'}, {'id': 10326, 'synset': 'redoubt.n.01', 'name': 'redoubt'}, {'id': 10327, 'synset': 'reduction_gear.n.01', 'name': 'reduction_gear'}, {'id': 10328, 'synset': 'reed_pipe.n.01', 'name': 'reed_pipe'}, {'id': 10329, 'synset': 'reed_stop.n.01', 'name': 'reed_stop'}, {'id': 10330, 'synset': 'reef_knot.n.01', 'name': 'reef_knot'}, {'id': 10331, 'synset': 'reel.n.03', 'name': 'reel'}, {'id': 10332, 'synset': 'reel.n.01', 'name': 'reel'}, {'id': 10333, 'synset': 'refectory.n.01', 'name': 'refectory'}, {'id': 10334, 'synset': 'refectory_table.n.01', 'name': 'refectory_table'}, {'id': 10335, 'synset': 'refinery.n.01', 'name': 'refinery'}, {'id': 10336, 'synset': 'reflecting_telescope.n.01', 'name': 'reflecting_telescope'}, {'id': 10337, 'synset': 'reflectometer.n.01', 'name': 'reflectometer'}, {'id': 10338, 'synset': 'reflex_camera.n.01', 'name': 'reflex_camera'}, {'id': 10339, 'synset': 'reflux_condenser.n.01', 'name': 'reflux_condenser'}, {'id': 10340, 'synset': 'reformatory.n.01', 'name': 'reformatory'}, {'id': 10341, 'synset': 'reformer.n.02', 'name': 'reformer'}, {'id': 10342, 'synset': 'refracting_telescope.n.01', 'name': 'refracting_telescope'}, {'id': 10343, 'synset': 'refractometer.n.01', 'name': 'refractometer'}, {'id': 10344, 'synset': 'refrigeration_system.n.01', 'name': 'refrigeration_system'}, {'id': 10345, 'synset': 'refrigerator.n.01', 'name': 'refrigerator'}, {'id': 10346, 'synset': 'refrigerator_car.n.01', 'name': 'refrigerator_car'}, {'id': 10347, 'synset': 'refuge.n.03', 'name': 'refuge'}, {'id': 10348, 'synset': 'regalia.n.01', 'name': 'regalia'}, {'id': 10349, 'synset': 'regimentals.n.01', 'name': 'regimentals'}, {'id': 10350, 'synset': 'regulator.n.01', 'name': 'regulator'}, {'id': 10351, 'synset': 'rein.n.01', 'name': 'rein'}, {'id': 10352, 'synset': 'relay.n.05', 'name': 'relay'}, {'id': 10353, 'synset': 'release.n.08', 'name': 'release'}, {'id': 10354, 'synset': 'religious_residence.n.01', 'name': 'religious_residence'}, {'id': 10355, 'synset': 'reliquary.n.01', 'name': 'reliquary'}, {'id': 10356, 'synset': 'remote_terminal.n.01', 'name': 'remote_terminal'}, {'id': 10357, 'synset': 'removable_disk.n.01', 'name': 'removable_disk'}, {'id': 10358, 'synset': 'rendering.n.05', 'name': 'rendering'}, {'id': 10359, 'synset': 'rep.n.02', 'name': 'rep'}, {'id': 10360, 'synset': 'repair_shop.n.01', 'name': 'repair_shop'}, {'id': 10361, 'synset': 'repeater.n.04', 'name': 'repeater'}, {'id': 10362, 'synset': 'repeating_firearm.n.01', 'name': 'repeating_firearm'}, {'id': 10363, 'synset': 'repository.n.03', 'name': 'repository'}, {'id': 10364, 'synset': 'reproducer.n.01', 'name': 'reproducer'}, {'id': 10365, 'synset': 'rerebrace.n.01', 'name': 'rerebrace'}, {'id': 10366, 'synset': 'rescue_equipment.n.01', 'name': 'rescue_equipment'}, {'id': 10367, 'synset': 'research_center.n.01', 'name': 'research_center'}, {'id': 10368, 'synset': 'reseau.n.02', 'name': 'reseau'}, {'id': 10369, 'synset': 'reservoir.n.03', 'name': 'reservoir'}, {'id': 10370, 'synset': 'reset.n.01', 'name': 'reset'}, {'id': 10371, 'synset': 'reset_button.n.01', 'name': 'reset_button'}, {'id': 10372, 'synset': 'residence.n.02', 'name': 'residence'}, {'id': 10373, 'synset': 'resistance_pyrometer.n.01', 'name': 'resistance_pyrometer'}, {'id': 10374, 'synset': 'resistor.n.01', 'name': 'resistor'}, {'id': 10375, 'synset': 'resonator.n.03', 'name': 'resonator'}, {'id': 10376, 'synset': 'resonator.n.01', 'name': 'resonator'}, {'id': 10377, 'synset': 'resort_hotel.n.02', 'name': 'resort_hotel'}, {'id': 10378, 'synset': 'respirator.n.01', 'name': 'respirator'}, {'id': 10379, 'synset': 'restaurant.n.01', 'name': 'restaurant'}, {'id': 10380, 'synset': 'rest_house.n.01', 'name': 'rest_house'}, {'id': 10381, 'synset': 'restraint.n.06', 'name': 'restraint'}, {'id': 10382, 'synset': 'resuscitator.n.01', 'name': 'resuscitator'}, {'id': 10383, 'synset': 'retainer.n.03', 'name': 'retainer'}, {'id': 10384, 'synset': 'retaining_wall.n.01', 'name': 'retaining_wall'}, {'id': 10385, 'synset': 'reticle.n.01', 'name': 'reticle'}, {'id': 10386, 'synset': 'reticulation.n.02', 'name': 'reticulation'}, {'id': 10387, 'synset': 'reticule.n.01', 'name': 'reticule'}, {'id': 10388, 'synset': 'retort.n.02', 'name': 'retort'}, {'id': 10389, 'synset': 'retractor.n.01', 'name': 'retractor'}, {'id': 10390, 'synset': 'return_key.n.01', 'name': 'return_key'}, {'id': 10391, 'synset': 'reverberatory_furnace.n.01', 'name': 'reverberatory_furnace'}, {'id': 10392, 'synset': 'revers.n.01', 'name': 'revers'}, {'id': 10393, 'synset': 'reverse.n.02', 'name': 'reverse'}, {'id': 10394, 'synset': 'reversible.n.01', 'name': 'reversible'}, {'id': 10395, 'synset': 'revetment.n.02', 'name': 'revetment'}, {'id': 10396, 'synset': 'revetment.n.01', 'name': 'revetment'}, {'id': 10397, 'synset': 'revolver.n.01', 'name': 'revolver'}, {'id': 10398, 'synset': 'revolving_door.n.02', 'name': 'revolving_door'}, {'id': 10399, 'synset': 'rheometer.n.01', 'name': 'rheometer'}, {'id': 10400, 'synset': 'rheostat.n.01', 'name': 'rheostat'}, {'id': 10401, 'synset': 'rhinoscope.n.01', 'name': 'rhinoscope'}, {'id': 10402, 'synset': 'rib.n.01', 'name': 'rib'}, {'id': 10403, 'synset': 'riband.n.01', 'name': 'riband'}, {'id': 10404, 'synset': 'ribbed_vault.n.01', 'name': 'ribbed_vault'}, {'id': 10405, 'synset': 'ribbing.n.01', 'name': 'ribbing'}, {'id': 10406, 'synset': 'ribbon_development.n.01', 'name': 'ribbon_development'}, {'id': 10407, 'synset': 'rib_joint_pliers.n.01', 'name': 'rib_joint_pliers'}, {'id': 10408, 'synset': 'ricer.n.01', 'name': 'ricer'}, {'id': 10409, 'synset': 'riddle.n.02', 'name': 'riddle'}, {'id': 10410, 'synset': 'ride.n.02', 'name': 'ride'}, {'id': 10411, 'synset': 'ridge.n.06', 'name': 'ridge'}, {'id': 10412, 'synset': 'ridge_rope.n.01', 'name': 'ridge_rope'}, {'id': 10413, 'synset': 'riding_boot.n.01', 'name': 'riding_boot'}, {'id': 10414, 'synset': 'riding_crop.n.01', 'name': 'riding_crop'}, {'id': 10415, 'synset': 'riding_mower.n.01', 'name': 'riding_mower'}, {'id': 10416, 'synset': 'rifle_ball.n.01', 'name': 'rifle_ball'}, {'id': 10417, 'synset': 'rifle_grenade.n.01', 'name': 'rifle_grenade'}, {'id': 10418, 'synset': 'rig.n.01', 'name': 'rig'}, {'id': 10419, 'synset': 'rigger.n.02', 'name': 'rigger'}, {'id': 10420, 'synset': 'rigger.n.04', 'name': 'rigger'}, {'id': 10421, 'synset': 'rigging.n.01', 'name': 'rigging'}, {'id': 10422, 'synset': 'rigout.n.01', 'name': 'rigout'}, {'id': 10423, 'synset': 'ringlet.n.03', 'name': 'ringlet'}, {'id': 10424, 'synset': 'rings.n.01', 'name': 'rings'}, {'id': 10425, 'synset': 'rink.n.01', 'name': 'rink'}, {'id': 10426, 'synset': 'riot_gun.n.01', 'name': 'riot_gun'}, {'id': 10427, 'synset': 'ripcord.n.02', 'name': 'ripcord'}, {'id': 10428, 'synset': 'ripcord.n.01', 'name': 'ripcord'}, {'id': 10429, 'synset': 'ripping_bar.n.01', 'name': 'ripping_bar'}, {'id': 10430, 'synset': 'ripping_chisel.n.01', 'name': 'ripping_chisel'}, {'id': 10431, 'synset': 'ripsaw.n.01', 'name': 'ripsaw'}, {'id': 10432, 'synset': 'riser.n.03', 'name': 'riser'}, {'id': 10433, 'synset': 'riser.n.02', 'name': 'riser'}, {'id': 10434, 'synset': 'ritz.n.03', 'name': 'Ritz'}, {'id': 10435, 'synset': 'rivet.n.02', 'name': 'rivet'}, {'id': 10436, 'synset': 'riveting_machine.n.01', 'name': 'riveting_machine'}, {'id': 10437, 'synset': 'roach_clip.n.01', 'name': 'roach_clip'}, {'id': 10438, 'synset': 'road.n.01', 'name': 'road'}, {'id': 10439, 'synset': 'roadbed.n.01', 'name': 'roadbed'}, {'id': 10440, 'synset': 'roadblock.n.02', 'name': 'roadblock'}, {'id': 10441, 'synset': 'roadhouse.n.01', 'name': 'roadhouse'}, {'id': 10442, 'synset': 'roadster.n.01', 'name': 'roadster'}, {'id': 10443, 'synset': 'roadway.n.01', 'name': 'roadway'}, {'id': 10444, 'synset': 'roaster.n.04', 'name': 'roaster'}, {'id': 10445, 'synset': 'robotics_equipment.n.01', 'name': 'robotics_equipment'}, {'id': 10446, 'synset': 'rochon_prism.n.01', 'name': 'Rochon_prism'}, {'id': 10447, 'synset': 'rock_bit.n.01', 'name': 'rock_bit'}, {'id': 10448, 'synset': 'rocker.n.07', 'name': 'rocker'}, {'id': 10449, 'synset': 'rocker.n.05', 'name': 'rocker'}, {'id': 10450, 'synset': 'rocker_arm.n.01', 'name': 'rocker_arm'}, {'id': 10451, 'synset': 'rocket.n.02', 'name': 'rocket'}, {'id': 10452, 'synset': 'rocket.n.01', 'name': 'rocket'}, {'id': 10453, 'synset': 'rod.n.01', 'name': 'rod'}, {'id': 10454, 'synset': 'rodeo.n.02', 'name': 'rodeo'}, {'id': 10455, 'synset': 'roll.n.04', 'name': 'roll'}, {'id': 10456, 'synset': 'roller.n.04', 'name': 'roller'}, {'id': 10457, 'synset': 'roller.n.03', 'name': 'roller'}, {'id': 10458, 'synset': 'roller_bandage.n.01', 'name': 'roller_bandage'}, {'id': 10459, 'synset': 'in-line_skate.n.01', 'name': 'in-line_skate'}, {'id': 10460, 'synset': 'roller_blind.n.01', 'name': 'roller_blind'}, {'id': 10461, 'synset': 'roller_coaster.n.02', 'name': 'roller_coaster'}, {'id': 10462, 'synset': 'roller_towel.n.01', 'name': 'roller_towel'}, {'id': 10463, 'synset': 'roll_film.n.01', 'name': 'roll_film'}, {'id': 10464, 'synset': 'rolling_hitch.n.01', 'name': 'rolling_hitch'}, {'id': 10465, 'synset': 'rolling_mill.n.01', 'name': 'rolling_mill'}, {'id': 10466, 'synset': 'rolling_stock.n.01', 'name': 'rolling_stock'}, {'id': 10467, 'synset': 'roll-on.n.02', 'name': 'roll-on'}, {'id': 10468, 'synset': 'roll-on.n.01', 'name': 'roll-on'}, {'id': 10469, 'synset': 'roll-on_roll-off.n.01', 'name': 'roll-on_roll-off'}, {'id': 10470, 'synset': 'rolodex.n.01', 'name': 'Rolodex'}, {'id': 10471, 'synset': 'roman_arch.n.01', 'name': 'Roman_arch'}, {'id': 10472, 'synset': 'roman_building.n.01', 'name': 'Roman_building'}, {'id': 10473, 'synset': 'romper.n.02', 'name': 'romper'}, {'id': 10474, 'synset': 'rood_screen.n.01', 'name': 'rood_screen'}, {'id': 10475, 'synset': 'roof.n.01', 'name': 'roof'}, {'id': 10476, 'synset': 'roof.n.02', 'name': 'roof'}, {'id': 10477, 'synset': 'roofing.n.01', 'name': 'roofing'}, {'id': 10478, 'synset': 'room.n.01', 'name': 'room'}, {'id': 10479, 'synset': 'roomette.n.01', 'name': 'roomette'}, {'id': 10480, 'synset': 'room_light.n.01', 'name': 'room_light'}, {'id': 10481, 'synset': 'roost.n.01', 'name': 'roost'}, {'id': 10482, 'synset': 'rope.n.01', 'name': 'rope'}, {'id': 10483, 'synset': 'rope_bridge.n.01', 'name': 'rope_bridge'}, {'id': 10484, 'synset': 'rope_tow.n.01', 'name': 'rope_tow'}, {'id': 10485, 'synset': 'rose_water.n.01', 'name': 'rose_water'}, {'id': 10486, 'synset': 'rose_window.n.01', 'name': 'rose_window'}, {'id': 10487, 'synset': 'rosin_bag.n.01', 'name': 'rosin_bag'}, {'id': 10488, 'synset': 'rotary_actuator.n.01', 'name': 'rotary_actuator'}, {'id': 10489, 'synset': 'rotary_engine.n.01', 'name': 'rotary_engine'}, {'id': 10490, 'synset': 'rotary_press.n.01', 'name': 'rotary_press'}, {'id': 10491, 'synset': 'rotating_mechanism.n.01', 'name': 'rotating_mechanism'}, {'id': 10492, 'synset': 'rotating_shaft.n.01', 'name': 'rotating_shaft'}, {'id': 10493, 'synset': 'rotisserie.n.02', 'name': 'rotisserie'}, {'id': 10494, 'synset': 'rotisserie.n.01', 'name': 'rotisserie'}, {'id': 10495, 'synset': 'rotor.n.03', 'name': 'rotor'}, {'id': 10496, 'synset': 'rotor.n.01', 'name': 'rotor'}, {'id': 10497, 'synset': 'rotor.n.02', 'name': 'rotor'}, {'id': 10498, 'synset': 'rotor_blade.n.01', 'name': 'rotor_blade'}, {'id': 10499, 'synset': 'rotor_head.n.01', 'name': 'rotor_head'}, {'id': 10500, 'synset': 'rotunda.n.02', 'name': 'rotunda'}, {'id': 10501, 'synset': 'rotunda.n.01', 'name': 'rotunda'}, {'id': 10502, 'synset': 'rouge.n.01', 'name': 'rouge'}, {'id': 10503, 'synset': 'roughcast.n.02', 'name': 'roughcast'}, {'id': 10504, 'synset': 'rouleau.n.02', 'name': 'rouleau'}, {'id': 10505, 'synset': 'roulette.n.02', 'name': 'roulette'}, {'id': 10506, 'synset': 'roulette_ball.n.01', 'name': 'roulette_ball'}, {'id': 10507, 'synset': 'roulette_wheel.n.01', 'name': 'roulette_wheel'}, {'id': 10508, 'synset': 'round.n.01', 'name': 'round'}, {'id': 10509, 'synset': 'round_arch.n.01', 'name': 'round_arch'}, {'id': 10510, 'synset': 'round-bottom_flask.n.01', 'name': 'round-bottom_flask'}, {'id': 10511, 'synset': 'roundel.n.02', 'name': 'roundel'}, {'id': 10512, 'synset': 'round_file.n.01', 'name': 'round_file'}, {'id': 10513, 'synset': 'roundhouse.n.01', 'name': 'roundhouse'}, {'id': 10514, 'synset': 'router.n.03', 'name': 'router'}, {'id': 10515, 'synset': 'router_plane.n.01', 'name': 'router_plane'}, {'id': 10516, 'synset': 'rowel.n.01', 'name': 'rowel'}, {'id': 10517, 'synset': 'row_house.n.01', 'name': 'row_house'}, {'id': 10518, 'synset': 'rowing_boat.n.01', 'name': 'rowing_boat'}, {'id': 10519, 'synset': 'rowlock_arch.n.01', 'name': 'rowlock_arch'}, {'id': 10520, 'synset': 'royal.n.01', 'name': 'royal'}, {'id': 10521, 'synset': 'royal_mast.n.01', 'name': 'royal_mast'}, {'id': 10522, 'synset': 'rubber_boot.n.01', 'name': 'rubber_boot'}, {'id': 10523, 'synset': 'rubber_bullet.n.01', 'name': 'rubber_bullet'}, {'id': 10524, 'synset': 'rubber_eraser.n.01', 'name': 'rubber_eraser'}, {'id': 10525, 'synset': 'rudder.n.02', 'name': 'rudder'}, {'id': 10526, 'synset': 'rudder.n.01', 'name': 'rudder'}, {'id': 10527, 'synset': 'rudder_blade.n.01', 'name': 'rudder_blade'}, {'id': 10528, 'synset': 'rug.n.01', 'name': 'rug'}, {'id': 10529, 'synset': 'rugby_ball.n.01', 'name': 'rugby_ball'}, {'id': 10530, 'synset': 'ruin.n.02', 'name': 'ruin'}, {'id': 10531, 'synset': 'rule.n.12', 'name': 'rule'}, {'id': 10532, 'synset': 'rumble.n.02', 'name': 'rumble'}, {'id': 10533, 'synset': 'rumble_seat.n.01', 'name': 'rumble_seat'}, {'id': 10534, 'synset': 'rummer.n.01', 'name': 'rummer'}, {'id': 10535, 'synset': 'rumpus_room.n.01', 'name': 'rumpus_room'}, {'id': 10536, 'synset': 'runcible_spoon.n.01', 'name': 'runcible_spoon'}, {'id': 10537, 'synset': 'rundle.n.01', 'name': 'rundle'}, {'id': 10538, 'synset': 'running_shoe.n.01', 'name': 'running_shoe'}, {'id': 10539, 'synset': 'running_suit.n.01', 'name': 'running_suit'}, {'id': 10540, 'synset': 'runway.n.04', 'name': 'runway'}, {'id': 10541, 'synset': 'rushlight.n.01', 'name': 'rushlight'}, {'id': 10542, 'synset': 'russet.n.01', 'name': 'russet'}, {'id': 10543, 'synset': 'rya.n.01', 'name': 'rya'}, {'id': 10544, 'synset': 'saber.n.01', 'name': 'saber'}, {'id': 10545, 'synset': 'saber_saw.n.01', 'name': 'saber_saw'}, {'id': 10546, 'synset': 'sable.n.04', 'name': 'sable'}, {'id': 10547, 'synset': 'sable.n.01', 'name': 'sable'}, {'id': 10548, 'synset': 'sable_coat.n.01', 'name': 'sable_coat'}, {'id': 10549, 'synset': 'sabot.n.01', 'name': 'sabot'}, {'id': 10550, 'synset': 'sachet.n.01', 'name': 'sachet'}, {'id': 10551, 'synset': 'sack.n.05', 'name': 'sack'}, {'id': 10552, 'synset': 'sackbut.n.01', 'name': 'sackbut'}, {'id': 10553, 'synset': 'sackcloth.n.02', 'name': 'sackcloth'}, {'id': 10554, 'synset': 'sackcloth.n.01', 'name': 'sackcloth'}, {'id': 10555, 'synset': 'sack_coat.n.01', 'name': 'sack_coat'}, {'id': 10556, 'synset': 'sacking.n.01', 'name': 'sacking'}, {'id': 10557, 'synset': 'saddle_oxford.n.01', 'name': 'saddle_oxford'}, {'id': 10558, 'synset': 'saddlery.n.02', 'name': 'saddlery'}, {'id': 10559, 'synset': 'saddle_seat.n.01', 'name': 'saddle_seat'}, {'id': 10560, 'synset': 'saddle_stitch.n.01', 'name': 'saddle_stitch'}, {'id': 10561, 'synset': 'safe.n.01', 'name': 'safe'}, {'id': 10562, 'synset': 'safe.n.02', 'name': 'safe'}, {'id': 10563, 'synset': 'safe-deposit.n.01', 'name': 'safe-deposit'}, {'id': 10564, 'synset': 'safe_house.n.01', 'name': 'safe_house'}, {'id': 10565, 'synset': 'safety_arch.n.01', 'name': 'safety_arch'}, {'id': 10566, 'synset': 'safety_belt.n.01', 'name': 'safety_belt'}, {'id': 10567, 'synset': 'safety_bicycle.n.01', 'name': 'safety_bicycle'}, {'id': 10568, 'synset': 'safety_bolt.n.01', 'name': 'safety_bolt'}, {'id': 10569, 'synset': 'safety_curtain.n.01', 'name': 'safety_curtain'}, {'id': 10570, 'synset': 'safety_fuse.n.01', 'name': 'safety_fuse'}, {'id': 10571, 'synset': 'safety_lamp.n.01', 'name': 'safety_lamp'}, {'id': 10572, 'synset': 'safety_match.n.01', 'name': 'safety_match'}, {'id': 10573, 'synset': 'safety_net.n.02', 'name': 'safety_net'}, {'id': 10574, 'synset': 'safety_rail.n.01', 'name': 'safety_rail'}, {'id': 10575, 'synset': 'safety_razor.n.01', 'name': 'safety_razor'}, {'id': 10576, 'synset': 'safety_valve.n.01', 'name': 'safety_valve'}, {'id': 10577, 'synset': 'sail.n.03', 'name': 'sail'}, {'id': 10578, 'synset': 'sailboat.n.01', 'name': 'sailboat'}, {'id': 10579, 'synset': 'sailcloth.n.01', 'name': 'sailcloth'}, {'id': 10580, 'synset': 'sailing_vessel.n.01', 'name': 'sailing_vessel'}, {'id': 10581, 'synset': 'sailing_warship.n.01', 'name': 'sailing_warship'}, {'id': 10582, 'synset': 'sailor_cap.n.01', 'name': 'sailor_cap'}, {'id': 10583, 'synset': 'sailor_suit.n.01', 'name': 'sailor_suit'}, {'id': 10584, 'synset': 'salad_bar.n.01', 'name': 'salad_bar'}, {'id': 10585, 'synset': 'salad_bowl.n.02', 'name': 'salad_bowl'}, {'id': 10586, 'synset': 'salinometer.n.01', 'name': 'salinometer'}, {'id': 10587, 'synset': 'sallet.n.01', 'name': 'sallet'}, {'id': 10588, 'synset': 'salon.n.03', 'name': 'salon'}, {'id': 10589, 'synset': 'salon.n.01', 'name': 'salon'}, {'id': 10590, 'synset': 'salon.n.02', 'name': 'salon'}, {'id': 10591, 'synset': 'saltbox.n.01', 'name': 'saltbox'}, {'id': 10592, 'synset': 'saltcellar.n.01', 'name': 'saltcellar'}, {'id': 10593, 'synset': 'saltworks.n.01', 'name': 'saltworks'}, {'id': 10594, 'synset': 'salver.n.01', 'name': 'salver'}, {'id': 10595, 'synset': 'salwar.n.01', 'name': 'salwar'}, {'id': 10596, 'synset': 'sam_browne_belt.n.01', 'name': 'Sam_Browne_belt'}, {'id': 10597, 'synset': 'samisen.n.01', 'name': 'samisen'}, {'id': 10598, 'synset': 'samite.n.01', 'name': 'samite'}, {'id': 10599, 'synset': 'samovar.n.01', 'name': 'samovar'}, {'id': 10600, 'synset': 'sampan.n.01', 'name': 'sampan'}, {'id': 10601, 'synset': 'sandbag.n.01', 'name': 'sandbag'}, {'id': 10602, 'synset': 'sandblaster.n.01', 'name': 'sandblaster'}, {'id': 10603, 'synset': 'sandbox.n.01', 'name': 'sandbox'}, {'id': 10604, 'synset': 'sandglass.n.01', 'name': 'sandglass'}, {'id': 10605, 'synset': 'sand_wedge.n.01', 'name': 'sand_wedge'}, {'id': 10606, 'synset': 'sandwich_board.n.01', 'name': 'sandwich_board'}, {'id': 10607, 'synset': 'sanitary_napkin.n.01', 'name': 'sanitary_napkin'}, {'id': 10608, 'synset': 'cling_film.n.01', 'name': 'cling_film'}, {'id': 10609, 'synset': 'sarcenet.n.01', 'name': 'sarcenet'}, {'id': 10610, 'synset': 'sarcophagus.n.01', 'name': 'sarcophagus'}, {'id': 10611, 'synset': 'sari.n.01', 'name': 'sari'}, {'id': 10612, 'synset': 'sarong.n.01', 'name': 'sarong'}, {'id': 10613, 'synset': 'sash.n.01', 'name': 'sash'}, {'id': 10614, 'synset': 'sash_fastener.n.01', 'name': 'sash_fastener'}, {'id': 10615, 'synset': 'sash_window.n.01', 'name': 'sash_window'}, {'id': 10616, 'synset': 'sateen.n.01', 'name': 'sateen'}, {'id': 10617, 'synset': 'satellite.n.01', 'name': 'satellite'}, {'id': 10618, 'synset': 'satellite_receiver.n.01', 'name': 'satellite_receiver'}, {'id': 10619, 'synset': 'satellite_television.n.01', 'name': 'satellite_television'}, {'id': 10620, 'synset': 'satellite_transmitter.n.01', 'name': 'satellite_transmitter'}, {'id': 10621, 'synset': 'satin.n.01', 'name': 'satin'}, {'id': 10622, 'synset': 'saturday_night_special.n.01', 'name': 'Saturday_night_special'}, {'id': 10623, 'synset': 'saucepot.n.01', 'name': 'saucepot'}, {'id': 10624, 'synset': 'sauna.n.01', 'name': 'sauna'}, {'id': 10625, 'synset': 'savings_bank.n.02', 'name': 'savings_bank'}, {'id': 10626, 'synset': 'saw.n.02', 'name': 'saw'}, {'id': 10627, 'synset': 'sawed-off_shotgun.n.01', 'name': 'sawed-off_shotgun'}, {'id': 10628, 'synset': 'sawmill.n.01', 'name': 'sawmill'}, {'id': 10629, 'synset': 'saw_set.n.01', 'name': 'saw_set'}, {'id': 10630, 'synset': 'saxhorn.n.01', 'name': 'saxhorn'}, {'id': 10631, 'synset': 'scabbard.n.01', 'name': 'scabbard'}, {'id': 10632, 'synset': 'scaffolding.n.01', 'name': 'scaffolding'}, {'id': 10633, 'synset': 'scale.n.08', 'name': 'scale'}, {'id': 10634, 'synset': 'scaler.n.01', 'name': 'scaler'}, {'id': 10635, 'synset': 'scaling_ladder.n.01', 'name': 'scaling_ladder'}, {'id': 10636, 'synset': 'scalpel.n.01', 'name': 'scalpel'}, {'id': 10637, 'synset': 'scanner.n.04', 'name': 'scanner'}, {'id': 10638, 'synset': 'scanner.n.03', 'name': 'scanner'}, {'id': 10639, 'synset': 'scanner.n.02', 'name': 'scanner'}, {'id': 10640, 'synset': 'scantling.n.01', 'name': 'scantling'}, {'id': 10641, 'synset': 'scarf_joint.n.01', 'name': 'scarf_joint'}, {'id': 10642, 'synset': 'scatter_rug.n.01', 'name': 'scatter_rug'}, {'id': 10643, 'synset': 'scauper.n.01', 'name': 'scauper'}, {'id': 10644, 'synset': 'schmidt_telescope.n.01', 'name': 'Schmidt_telescope'}, {'id': 10645, 'synset': 'school.n.02', 'name': 'school'}, {'id': 10646, 'synset': 'schoolbag.n.01', 'name': 'schoolbag'}, {'id': 10647, 'synset': 'school_bell.n.01', 'name': 'school_bell'}, {'id': 10648, 'synset': 'school_ship.n.01', 'name': 'school_ship'}, {'id': 10649, 'synset': 'school_system.n.01', 'name': 'school_system'}, {'id': 10650, 'synset': 'schooner.n.02', 'name': 'schooner'}, {'id': 10651, 'synset': 'schooner.n.01', 'name': 'schooner'}, {'id': 10652, 'synset': 'scientific_instrument.n.01', 'name': 'scientific_instrument'}, {'id': 10653, 'synset': 'scimitar.n.01', 'name': 'scimitar'}, {'id': 10654, 'synset': 'scintillation_counter.n.01', 'name': 'scintillation_counter'}, {'id': 10655, 'synset': 'sclerometer.n.01', 'name': 'sclerometer'}, {'id': 10656, 'synset': 'scoinson_arch.n.01', 'name': 'scoinson_arch'}, {'id': 10657, 'synset': 'sconce.n.04', 'name': 'sconce'}, {'id': 10658, 'synset': 'sconce.n.03', 'name': 'sconce'}, {'id': 10659, 'synset': 'scoop.n.06', 'name': 'scoop'}, {'id': 10660, 'synset': 'scooter.n.02', 'name': 'scooter'}, {'id': 10661, 'synset': 'scouring_pad.n.01', 'name': 'scouring_pad'}, {'id': 10662, 'synset': 'scow.n.02', 'name': 'scow'}, {'id': 10663, 'synset': 'scow.n.01', 'name': 'scow'}, {'id': 10664, 'synset': 'scratcher.n.03', 'name': 'scratcher'}, {'id': 10665, 'synset': 'screen.n.05', 'name': 'screen'}, {'id': 10666, 'synset': 'screen.n.04', 'name': 'screen'}, {'id': 10667, 'synset': 'screen.n.09', 'name': 'screen'}, {'id': 10668, 'synset': 'screen.n.03', 'name': 'screen'}, {'id': 10669, 'synset': 'screen_door.n.01', 'name': 'screen_door'}, {'id': 10670, 'synset': 'screening.n.02', 'name': 'screening'}, {'id': 10671, 'synset': 'screw.n.04', 'name': 'screw'}, {'id': 10672, 'synset': 'screw.n.03', 'name': 'screw'}, {'id': 10673, 'synset': 'screw.n.02', 'name': 'screw'}, {'id': 10674, 'synset': 'screw_eye.n.01', 'name': 'screw_eye'}, {'id': 10675, 'synset': 'screw_key.n.01', 'name': 'screw_key'}, {'id': 10676, 'synset': 'screw_thread.n.01', 'name': 'screw_thread'}, {'id': 10677, 'synset': 'screwtop.n.01', 'name': 'screwtop'}, {'id': 10678, 'synset': 'screw_wrench.n.01', 'name': 'screw_wrench'}, {'id': 10679, 'synset': 'scriber.n.01', 'name': 'scriber'}, {'id': 10680, 'synset': 'scrim.n.01', 'name': 'scrim'}, {'id': 10681, 'synset': 'scrimshaw.n.01', 'name': 'scrimshaw'}, {'id': 10682, 'synset': 'scriptorium.n.01', 'name': 'scriptorium'}, {'id': 10683, 'synset': 'scrubber.n.03', 'name': 'scrubber'}, {'id': 10684, 'synset': 'scrub_plane.n.01', 'name': 'scrub_plane'}, {'id': 10685, 'synset': 'scuffer.n.01', 'name': 'scuffer'}, {'id': 10686, 'synset': 'scuffle.n.02', 'name': 'scuffle'}, {'id': 10687, 'synset': 'scull.n.02', 'name': 'scull'}, {'id': 10688, 'synset': 'scull.n.01', 'name': 'scull'}, {'id': 10689, 'synset': 'scullery.n.01', 'name': 'scullery'}, {'id': 10690, 'synset': 'scuttle.n.01', 'name': 'scuttle'}, {'id': 10691, 'synset': 'scyphus.n.01', 'name': 'scyphus'}, {'id': 10692, 'synset': 'scythe.n.01', 'name': 'scythe'}, {'id': 10693, 'synset': 'seabag.n.01', 'name': 'seabag'}, {'id': 10694, 'synset': 'sea_boat.n.01', 'name': 'sea_boat'}, {'id': 10695, 'synset': 'sea_chest.n.01', 'name': 'sea_chest'}, {'id': 10696, 'synset': 'sealing_wax.n.01', 'name': 'sealing_wax'}, {'id': 10697, 'synset': 'sealskin.n.02', 'name': 'sealskin'}, {'id': 10698, 'synset': 'seam.n.01', 'name': 'seam'}, {'id': 10699, 'synset': 'searchlight.n.01', 'name': 'searchlight'}, {'id': 10700, 'synset': 'searing_iron.n.01', 'name': 'searing_iron'}, {'id': 10701, 'synset': 'seat.n.04', 'name': 'seat'}, {'id': 10702, 'synset': 'seat.n.03', 'name': 'seat'}, {'id': 10703, 'synset': 'seat.n.09', 'name': 'seat'}, {'id': 10704, 'synset': 'seat_belt.n.01', 'name': 'seat_belt'}, {'id': 10705, 'synset': 'secateurs.n.01', 'name': 'secateurs'}, {'id': 10706, 'synset': 'secondary_coil.n.01', 'name': 'secondary_coil'}, {'id': 10707, 'synset': 'second_balcony.n.01', 'name': 'second_balcony'}, {'id': 10708, 'synset': 'second_base.n.01', 'name': 'second_base'}, {'id': 10709, 'synset': 'second_hand.n.02', 'name': 'second_hand'}, {'id': 10710, 'synset': 'secretary.n.04', 'name': 'secretary'}, {'id': 10711, 'synset': 'sectional.n.01', 'name': 'sectional'}, {'id': 10712, 'synset': 'security_blanket.n.02', 'name': 'security_blanket'}, {'id': 10713, 'synset': 'security_system.n.02', 'name': 'security_system'}, {'id': 10714, 'synset': 'security_system.n.01', 'name': 'security_system'}, {'id': 10715, 'synset': 'sedan.n.01', 'name': 'sedan'}, {'id': 10716, 'synset': 'sedan.n.02', 'name': 'sedan'}, {'id': 10717, 'synset': 'seeder.n.02', 'name': 'seeder'}, {'id': 10718, 'synset': 'seeker.n.02', 'name': 'seeker'}, {'id': 10719, 'synset': 'seersucker.n.01', 'name': 'seersucker'}, {'id': 10720, 'synset': 'segmental_arch.n.01', 'name': 'segmental_arch'}, {'id': 10721, 'synset': 'segway.n.01', 'name': 'Segway'}, {'id': 10722, 'synset': 'seidel.n.01', 'name': 'seidel'}, {'id': 10723, 'synset': 'seine.n.02', 'name': 'seine'}, {'id': 10724, 'synset': 'seismograph.n.01', 'name': 'seismograph'}, {'id': 10725, 'synset': 'selector.n.02', 'name': 'selector'}, {'id': 10726, 'synset': 'selenium_cell.n.01', 'name': 'selenium_cell'}, {'id': 10727, 'synset': 'self-propelled_vehicle.n.01', 'name': 'self-propelled_vehicle'}, {'id': 10728, 'synset': 'self-registering_thermometer.n.01', 'name': 'self-registering_thermometer'}, {'id': 10729, 'synset': 'self-starter.n.02', 'name': 'self-starter'}, {'id': 10730, 'synset': 'selsyn.n.01', 'name': 'selsyn'}, {'id': 10731, 'synset': 'selvage.n.02', 'name': 'selvage'}, {'id': 10732, 'synset': 'semaphore.n.01', 'name': 'semaphore'}, {'id': 10733, 'synset': 'semiautomatic_firearm.n.01', 'name': 'semiautomatic_firearm'}, {'id': 10734, 'synset': 'semiautomatic_pistol.n.01', 'name': 'semiautomatic_pistol'}, {'id': 10735, 'synset': 'semiconductor_device.n.01', 'name': 'semiconductor_device'}, {'id': 10736, 'synset': 'semi-detached_house.n.01', 'name': 'semi-detached_house'}, {'id': 10737, 'synset': 'semigloss.n.01', 'name': 'semigloss'}, {'id': 10738, 'synset': 'semitrailer.n.01', 'name': 'semitrailer'}, {'id': 10739, 'synset': 'sennit.n.01', 'name': 'sennit'}, {'id': 10740, 'synset': 'sensitometer.n.01', 'name': 'sensitometer'}, {'id': 10741, 'synset': 'sentry_box.n.01', 'name': 'sentry_box'}, {'id': 10742, 'synset': 'separate.n.02', 'name': 'separate'}, {'id': 10743, 'synset': 'septic_tank.n.01', 'name': 'septic_tank'}, {'id': 10744, 'synset': 'sequence.n.03', 'name': 'sequence'}, {'id': 10745, 'synset': 'sequencer.n.01', 'name': 'sequencer'}, {'id': 10746, 'synset': 'serape.n.01', 'name': 'serape'}, {'id': 10747, 'synset': 'serge.n.01', 'name': 'serge'}, {'id': 10748, 'synset': 'serger.n.01', 'name': 'serger'}, {'id': 10749, 'synset': 'serial_port.n.01', 'name': 'serial_port'}, {'id': 10750, 'synset': 'serpent.n.03', 'name': 'serpent'}, {'id': 10751, 'synset': 'serration.n.03', 'name': 'serration'}, {'id': 10752, 'synset': 'server.n.04', 'name': 'server'}, {'id': 10753, 'synset': 'server.n.03', 'name': 'server'}, {'id': 10754, 'synset': 'service_club.n.02', 'name': 'service_club'}, {'id': 10755, 'synset': 'serving_cart.n.01', 'name': 'serving_cart'}, {'id': 10756, 'synset': 'serving_dish.n.01', 'name': 'serving_dish'}, {'id': 10757, 'synset': 'servo.n.01', 'name': 'servo'}, {'id': 10758, 'synset': 'set.n.13', 'name': 'set'}, {'id': 10759, 'synset': 'set_gun.n.01', 'name': 'set_gun'}, {'id': 10760, 'synset': 'setscrew.n.02', 'name': 'setscrew'}, {'id': 10761, 'synset': 'setscrew.n.01', 'name': 'setscrew'}, {'id': 10762, 'synset': 'set_square.n.01', 'name': 'set_square'}, {'id': 10763, 'synset': 'settee.n.02', 'name': 'settee'}, {'id': 10764, 'synset': 'settle.n.01', 'name': 'settle'}, {'id': 10765, 'synset': 'settlement_house.n.01', 'name': 'settlement_house'}, {'id': 10766, 'synset': 'seventy-eight.n.02', 'name': 'seventy-eight'}, {'id': 10767, 'synset': 'seven_wonders_of_the_ancient_world.n.01', 'name': 'Seven_Wonders_of_the_Ancient_World'}, {'id': 10768, 'synset': 'sewage_disposal_plant.n.01', 'name': 'sewage_disposal_plant'}, {'id': 10769, 'synset': 'sewer.n.01', 'name': 'sewer'}, {'id': 10770, 'synset': 'sewing_basket.n.01', 'name': 'sewing_basket'}, {'id': 10771, 'synset': 'sewing_kit.n.01', 'name': 'sewing_kit'}, {'id': 10772, 'synset': 'sewing_needle.n.01', 'name': 'sewing_needle'}, {'id': 10773, 'synset': 'sewing_room.n.01', 'name': 'sewing_room'}, {'id': 10774, 'synset': 'sextant.n.02', 'name': 'sextant'}, {'id': 10775, 'synset': 'sgraffito.n.01', 'name': 'sgraffito'}, {'id': 10776, 'synset': 'shackle.n.01', 'name': 'shackle'}, {'id': 10777, 'synset': 'shackle.n.02', 'name': 'shackle'}, {'id': 10778, 'synset': 'shade.n.03', 'name': 'shade'}, {'id': 10779, 'synset': 'shadow_box.n.01', 'name': 'shadow_box'}, {'id': 10780, 'synset': 'shaft.n.03', 'name': 'shaft'}, {'id': 10781, 'synset': 'shag_rug.n.01', 'name': 'shag_rug'}, {'id': 10782, 'synset': 'shank.n.04', 'name': 'shank'}, {'id': 10783, 'synset': 'shank.n.03', 'name': 'shank'}, {'id': 10784, 'synset': 'shantung.n.01', 'name': 'shantung'}, {'id': 10785, 'synset': 'shaper.n.02', 'name': 'shaper'}, {'id': 10786, 'synset': 'shaping_tool.n.01', 'name': 'shaping_tool'}, {'id': 10787, 'synset': 'sharkskin.n.01', 'name': 'sharkskin'}, {'id': 10788, 'synset': 'shaving_brush.n.01', 'name': 'shaving_brush'}, {'id': 10789, 'synset': 'shaving_foam.n.01', 'name': 'shaving_foam'}, {'id': 10790, 'synset': 'shawm.n.01', 'name': 'shawm'}, {'id': 10791, 'synset': 'sheath.n.01', 'name': 'sheath'}, {'id': 10792, 'synset': 'sheathing.n.01', 'name': 'sheathing'}, {'id': 10793, 'synset': 'shed.n.01', 'name': 'shed'}, {'id': 10794, 'synset': 'sheep_bell.n.01', 'name': 'sheep_bell'}, {'id': 10795, 'synset': 'sheepshank.n.01', 'name': 'sheepshank'}, {'id': 10796, 'synset': 'sheepskin_coat.n.01', 'name': 'sheepskin_coat'}, {'id': 10797, 'synset': 'sheepwalk.n.01', 'name': 'sheepwalk'}, {'id': 10798, 'synset': 'sheet.n.03', 'name': 'sheet'}, {'id': 10799, 'synset': 'sheet_bend.n.01', 'name': 'sheet_bend'}, {'id': 10800, 'synset': 'sheeting.n.01', 'name': 'sheeting'}, {'id': 10801, 'synset': 'sheet_pile.n.01', 'name': 'sheet_pile'}, {'id': 10802, 'synset': 'sheetrock.n.01', 'name': 'Sheetrock'}, {'id': 10803, 'synset': 'shelf.n.01', 'name': 'shelf'}, {'id': 10804, 'synset': 'shelf_bracket.n.01', 'name': 'shelf_bracket'}, {'id': 10805, 'synset': 'shell.n.01', 'name': 'shell'}, {'id': 10806, 'synset': 'shell.n.08', 'name': 'shell'}, {'id': 10807, 'synset': 'shell.n.07', 'name': 'shell'}, {'id': 10808, 'synset': 'shellac.n.02', 'name': 'shellac'}, {'id': 10809, 'synset': 'shelter.n.01', 'name': 'shelter'}, {'id': 10810, 'synset': 'shelter.n.02', 'name': 'shelter'}, {'id': 10811, 'synset': 'shelter.n.05', 'name': 'shelter'}, {'id': 10812, 'synset': 'sheltered_workshop.n.01', 'name': 'sheltered_workshop'}, {'id': 10813, 'synset': 'sheraton.n.01', 'name': 'Sheraton'}, {'id': 10814, 'synset': 'shield.n.01', 'name': 'shield'}, {'id': 10815, 'synset': 'shielding.n.03', 'name': 'shielding'}, {'id': 10816, 'synset': 'shift_key.n.01', 'name': 'shift_key'}, {'id': 10817, 'synset': 'shillelagh.n.01', 'name': 'shillelagh'}, {'id': 10818, 'synset': 'shim.n.01', 'name': 'shim'}, {'id': 10819, 'synset': 'shingle.n.03', 'name': 'shingle'}, {'id': 10820, 'synset': 'shin_guard.n.01', 'name': 'shin_guard'}, {'id': 10821, 'synset': 'ship.n.01', 'name': 'ship'}, {'id': 10822, 'synset': 'shipboard_system.n.01', 'name': 'shipboard_system'}, {'id': 10823, 'synset': 'shipping.n.02', 'name': 'shipping'}, {'id': 10824, 'synset': 'shipping_room.n.01', 'name': 'shipping_room'}, {'id': 10825, 'synset': 'ship-towed_long-range_acoustic_detection_system.n.01', 'name': 'ship-towed_long-range_acoustic_detection_system'}, {'id': 10826, 'synset': 'shipwreck.n.01', 'name': 'shipwreck'}, {'id': 10827, 'synset': 'shirt_button.n.01', 'name': 'shirt_button'}, {'id': 10828, 'synset': 'shirtdress.n.01', 'name': 'shirtdress'}, {'id': 10829, 'synset': 'shirtfront.n.01', 'name': 'shirtfront'}, {'id': 10830, 'synset': 'shirting.n.01', 'name': 'shirting'}, {'id': 10831, 'synset': 'shirtsleeve.n.01', 'name': 'shirtsleeve'}, {'id': 10832, 'synset': 'shirttail.n.02', 'name': 'shirttail'}, {'id': 10833, 'synset': 'shirtwaist.n.01', 'name': 'shirtwaist'}, {'id': 10834, 'synset': 'shiv.n.01', 'name': 'shiv'}, {'id': 10835, 'synset': 'shock_absorber.n.01', 'name': 'shock_absorber'}, {'id': 10836, 'synset': 'shoe.n.02', 'name': 'shoe'}, {'id': 10837, 'synset': 'shoebox.n.02', 'name': 'shoebox'}, {'id': 10838, 'synset': 'shoehorn.n.01', 'name': 'shoehorn'}, {'id': 10839, 'synset': 'shoe_shop.n.01', 'name': 'shoe_shop'}, {'id': 10840, 'synset': 'shoetree.n.01', 'name': 'shoetree'}, {'id': 10841, 'synset': 'shofar.n.01', 'name': 'shofar'}, {'id': 10842, 'synset': 'shoji.n.01', 'name': 'shoji'}, {'id': 10843, 'synset': 'shooting_brake.n.01', 'name': 'shooting_brake'}, {'id': 10844, 'synset': 'shooting_lodge.n.01', 'name': 'shooting_lodge'}, {'id': 10845, 'synset': 'shooting_stick.n.01', 'name': 'shooting_stick'}, {'id': 10846, 'synset': 'shop.n.01', 'name': 'shop'}, {'id': 10847, 'synset': 'shop_bell.n.01', 'name': 'shop_bell'}, {'id': 10848, 'synset': 'shopping_basket.n.01', 'name': 'shopping_basket'}, {'id': 10849, 'synset': 'short_circuit.n.01', 'name': 'short_circuit'}, {'id': 10850, 'synset': 'short_iron.n.01', 'name': 'short_iron'}, {'id': 10851, 'synset': 'short_sleeve.n.01', 'name': 'short_sleeve'}, {'id': 10852, 'synset': 'shortwave_diathermy_machine.n.01', 'name': 'shortwave_diathermy_machine'}, {'id': 10853, 'synset': 'shot.n.12', 'name': 'shot'}, {'id': 10854, 'synset': 'shotgun.n.01', 'name': 'shotgun'}, {'id': 10855, 'synset': 'shotgun_shell.n.01', 'name': 'shotgun_shell'}, {'id': 10856, 'synset': 'shot_tower.n.01', 'name': 'shot_tower'}, {'id': 10857, 'synset': 'shoulder.n.04', 'name': 'shoulder'}, {'id': 10858, 'synset': 'shouldered_arch.n.01', 'name': 'shouldered_arch'}, {'id': 10859, 'synset': 'shoulder_holster.n.01', 'name': 'shoulder_holster'}, {'id': 10860, 'synset': 'shoulder_pad.n.01', 'name': 'shoulder_pad'}, {'id': 10861, 'synset': 'shoulder_patch.n.01', 'name': 'shoulder_patch'}, {'id': 10862, 'synset': 'shovel.n.03', 'name': 'shovel'}, {'id': 10863, 'synset': 'shovel_hat.n.01', 'name': 'shovel_hat'}, {'id': 10864, 'synset': 'showboat.n.01', 'name': 'showboat'}, {'id': 10865, 'synset': 'shower_room.n.01', 'name': 'shower_room'}, {'id': 10866, 'synset': 'shower_stall.n.01', 'name': 'shower_stall'}, {'id': 10867, 'synset': 'showroom.n.01', 'name': 'showroom'}, {'id': 10868, 'synset': 'shrapnel.n.01', 'name': 'shrapnel'}, {'id': 10869, 'synset': 'shrimper.n.01', 'name': 'shrimper'}, {'id': 10870, 'synset': 'shrine.n.01', 'name': 'shrine'}, {'id': 10871, 'synset': 'shrink-wrap.n.01', 'name': 'shrink-wrap'}, {'id': 10872, 'synset': 'shunt.n.03', 'name': 'shunt'}, {'id': 10873, 'synset': 'shunt.n.02', 'name': 'shunt'}, {'id': 10874, 'synset': 'shunter.n.01', 'name': 'shunter'}, {'id': 10875, 'synset': 'shutter.n.02', 'name': 'shutter'}, {'id': 10876, 'synset': 'shutter.n.01', 'name': 'shutter'}, {'id': 10877, 'synset': 'shuttle.n.03', 'name': 'shuttle'}, {'id': 10878, 'synset': 'shuttle.n.02', 'name': 'shuttle'}, {'id': 10879, 'synset': 'shuttle_bus.n.01', 'name': 'shuttle_bus'}, {'id': 10880, 'synset': 'shuttlecock.n.01', 'name': 'shuttlecock'}, {'id': 10881, 'synset': 'shuttle_helicopter.n.01', 'name': 'shuttle_helicopter'}, {'id': 10882, 'synset': 'sibley_tent.n.01', 'name': 'Sibley_tent'}, {'id': 10883, 'synset': 'sickbay.n.01', 'name': 'sickbay'}, {'id': 10884, 'synset': 'sickbed.n.01', 'name': 'sickbed'}, {'id': 10885, 'synset': 'sickle.n.01', 'name': 'sickle'}, {'id': 10886, 'synset': 'sickroom.n.01', 'name': 'sickroom'}, {'id': 10887, 'synset': 'sideboard.n.02', 'name': 'sideboard'}, {'id': 10888, 'synset': 'sidecar.n.02', 'name': 'sidecar'}, {'id': 10889, 'synset': 'side_chapel.n.01', 'name': 'side_chapel'}, {'id': 10890, 'synset': 'sidelight.n.01', 'name': 'sidelight'}, {'id': 10891, 'synset': 'sidesaddle.n.01', 'name': 'sidesaddle'}, {'id': 10892, 'synset': 'sidewalk.n.01', 'name': 'sidewalk'}, {'id': 10893, 'synset': 'sidewall.n.02', 'name': 'sidewall'}, {'id': 10894, 'synset': 'side-wheeler.n.01', 'name': 'side-wheeler'}, {'id': 10895, 'synset': 'sidewinder.n.02', 'name': 'sidewinder'}, {'id': 10896, 'synset': 'sieve.n.01', 'name': 'sieve'}, {'id': 10897, 'synset': 'sifter.n.01', 'name': 'sifter'}, {'id': 10898, 'synset': 'sights.n.01', 'name': 'sights'}, {'id': 10899, 'synset': 'sigmoidoscope.n.01', 'name': 'sigmoidoscope'}, {'id': 10900, 'synset': 'signal_box.n.01', 'name': 'signal_box'}, {'id': 10901, 'synset': 'signaling_device.n.01', 'name': 'signaling_device'}, {'id': 10902, 'synset': 'silencer.n.02', 'name': 'silencer'}, {'id': 10903, 'synset': 'silent_butler.n.01', 'name': 'silent_butler'}, {'id': 10904, 'synset': 'silex.n.02', 'name': 'Silex'}, {'id': 10905, 'synset': 'silk.n.01', 'name': 'silk'}, {'id': 10906, 'synset': 'silks.n.01', 'name': 'silks'}, {'id': 10907, 'synset': 'silver_plate.n.02', 'name': 'silver_plate'}, {'id': 10908, 'synset': 'silverpoint.n.01', 'name': 'silverpoint'}, {'id': 10909, 'synset': 'simple_pendulum.n.01', 'name': 'simple_pendulum'}, {'id': 10910, 'synset': 'simulator.n.01', 'name': 'simulator'}, {'id': 10911, 'synset': 'single_bed.n.01', 'name': 'single_bed'}, {'id': 10912, 'synset': 'single-breasted_jacket.n.01', 'name': 'single-breasted_jacket'}, {'id': 10913, 'synset': 'single-breasted_suit.n.01', 'name': 'single-breasted_suit'}, {'id': 10914, 'synset': 'single_prop.n.01', 'name': 'single_prop'}, {'id': 10915, 'synset': 'single-reed_instrument.n.01', 'name': 'single-reed_instrument'}, {'id': 10916, 'synset': 'single-rotor_helicopter.n.01', 'name': 'single-rotor_helicopter'}, {'id': 10917, 'synset': 'singlestick.n.01', 'name': 'singlestick'}, {'id': 10918, 'synset': 'singlet.n.01', 'name': 'singlet'}, {'id': 10919, 'synset': 'siren.n.04', 'name': 'siren'}, {'id': 10920, 'synset': 'sister_ship.n.01', 'name': 'sister_ship'}, {'id': 10921, 'synset': 'sitar.n.01', 'name': 'sitar'}, {'id': 10922, 'synset': 'sitz_bath.n.01', 'name': 'sitz_bath'}, {'id': 10923, 'synset': 'six-pack.n.01', 'name': 'six-pack'}, {'id': 10924, 'synset': 'skate.n.01', 'name': 'skate'}, {'id': 10925, 'synset': 'skeg.n.01', 'name': 'skeg'}, {'id': 10926, 'synset': 'skein.n.01', 'name': 'skein'}, {'id': 10927, 'synset': 'skeleton.n.04', 'name': 'skeleton'}, {'id': 10928, 'synset': 'skeleton_key.n.01', 'name': 'skeleton_key'}, {'id': 10929, 'synset': 'skep.n.02', 'name': 'skep'}, {'id': 10930, 'synset': 'skep.n.01', 'name': 'skep'}, {'id': 10931, 'synset': 'sketch.n.01', 'name': 'sketch'}, {'id': 10932, 'synset': 'sketcher.n.02', 'name': 'sketcher'}, {'id': 10933, 'synset': 'skew_arch.n.01', 'name': 'skew_arch'}, {'id': 10934, 'synset': 'ski_binding.n.01', 'name': 'ski_binding'}, {'id': 10935, 'synset': 'skibob.n.01', 'name': 'skibob'}, {'id': 10936, 'synset': 'ski_cap.n.01', 'name': 'ski_cap'}, {'id': 10937, 'synset': 'skidder.n.03', 'name': 'skidder'}, {'id': 10938, 'synset': 'skid_lid.n.01', 'name': 'skid_lid'}, {'id': 10939, 'synset': 'skiff.n.01', 'name': 'skiff'}, {'id': 10940, 'synset': 'ski_jump.n.01', 'name': 'ski_jump'}, {'id': 10941, 'synset': 'ski_lodge.n.01', 'name': 'ski_lodge'}, {'id': 10942, 'synset': 'ski_mask.n.01', 'name': 'ski_mask'}, {'id': 10943, 'synset': 'skimmer.n.02', 'name': 'skimmer'}, {'id': 10944, 'synset': 'ski-plane.n.01', 'name': 'ski-plane'}, {'id': 10945, 'synset': 'ski_rack.n.01', 'name': 'ski_rack'}, {'id': 10946, 'synset': 'skirt.n.01', 'name': 'skirt'}, {'id': 10947, 'synset': 'ski_tow.n.01', 'name': 'ski_tow'}, {'id': 10948, 'synset': 'skivvies.n.01', 'name': 'Skivvies'}, {'id': 10949, 'synset': 'skybox.n.01', 'name': 'skybox'}, {'id': 10950, 'synset': 'skyhook.n.02', 'name': 'skyhook'}, {'id': 10951, 'synset': 'skylight.n.01', 'name': 'skylight'}, {'id': 10952, 'synset': 'skysail.n.01', 'name': 'skysail'}, {'id': 10953, 'synset': 'skyscraper.n.01', 'name': 'skyscraper'}, {'id': 10954, 'synset': 'skywalk.n.01', 'name': 'skywalk'}, {'id': 10955, 'synset': 'slacks.n.01', 'name': 'slacks'}, {'id': 10956, 'synset': 'slack_suit.n.01', 'name': 'slack_suit'}, {'id': 10957, 'synset': 'slasher.n.02', 'name': 'slasher'}, {'id': 10958, 'synset': 'slash_pocket.n.01', 'name': 'slash_pocket'}, {'id': 10959, 'synset': 'slat.n.01', 'name': 'slat'}, {'id': 10960, 'synset': 'slate.n.01', 'name': 'slate'}, {'id': 10961, 'synset': 'slate_pencil.n.01', 'name': 'slate_pencil'}, {'id': 10962, 'synset': 'slate_roof.n.01', 'name': 'slate_roof'}, {'id': 10963, 'synset': 'sleeper.n.07', 'name': 'sleeper'}, {'id': 10964, 'synset': 'sleeper.n.06', 'name': 'sleeper'}, {'id': 10965, 'synset': 'sleeping_car.n.01', 'name': 'sleeping_car'}, {'id': 10966, 'synset': 'sleeve.n.01', 'name': 'sleeve'}, {'id': 10967, 'synset': 'sleeve.n.02', 'name': 'sleeve'}, {'id': 10968, 'synset': 'sleigh_bed.n.01', 'name': 'sleigh_bed'}, {'id': 10969, 'synset': 'sleigh_bell.n.01', 'name': 'sleigh_bell'}, {'id': 10970, 'synset': 'slice_bar.n.01', 'name': 'slice_bar'}, {'id': 10971, 'synset': 'slicer.n.03', 'name': 'slicer'}, {'id': 10972, 'synset': 'slicer.n.02', 'name': 'slicer'}, {'id': 10973, 'synset': 'slide.n.04', 'name': 'slide'}, {'id': 10974, 'synset': 'slide_fastener.n.01', 'name': 'slide_fastener'}, {'id': 10975, 'synset': 'slide_projector.n.01', 'name': 'slide_projector'}, {'id': 10976, 'synset': 'slide_rule.n.01', 'name': 'slide_rule'}, {'id': 10977, 'synset': 'slide_valve.n.01', 'name': 'slide_valve'}, {'id': 10978, 'synset': 'sliding_door.n.01', 'name': 'sliding_door'}, {'id': 10979, 'synset': 'sliding_seat.n.01', 'name': 'sliding_seat'}, {'id': 10980, 'synset': 'sliding_window.n.01', 'name': 'sliding_window'}, {'id': 10981, 'synset': 'sling.n.04', 'name': 'sling'}, {'id': 10982, 'synset': 'slingback.n.01', 'name': 'slingback'}, {'id': 10983, 'synset': 'slinger_ring.n.01', 'name': 'slinger_ring'}, {'id': 10984, 'synset': 'slip_clutch.n.01', 'name': 'slip_clutch'}, {'id': 10985, 'synset': 'slipcover.n.01', 'name': 'slipcover'}, {'id': 10986, 'synset': 'slip-joint_pliers.n.01', 'name': 'slip-joint_pliers'}, {'id': 10987, 'synset': 'slipknot.n.01', 'name': 'slipknot'}, {'id': 10988, 'synset': 'slip-on.n.01', 'name': 'slip-on'}, {'id': 10989, 'synset': 'slip_ring.n.01', 'name': 'slip_ring'}, {'id': 10990, 'synset': 'slit_lamp.n.01', 'name': 'slit_lamp'}, {'id': 10991, 'synset': 'slit_trench.n.01', 'name': 'slit_trench'}, {'id': 10992, 'synset': 'sloop.n.01', 'name': 'sloop'}, {'id': 10993, 'synset': 'sloop_of_war.n.01', 'name': 'sloop_of_war'}, {'id': 10994, 'synset': 'slop_basin.n.01', 'name': 'slop_basin'}, {'id': 10995, 'synset': 'slop_pail.n.01', 'name': 'slop_pail'}, {'id': 10996, 'synset': 'slops.n.02', 'name': 'slops'}, {'id': 10997, 'synset': 'slopshop.n.01', 'name': 'slopshop'}, {'id': 10998, 'synset': 'slot.n.07', 'name': 'slot'}, {'id': 10999, 'synset': 'slot_machine.n.01', 'name': 'slot_machine'}, {'id': 11000, 'synset': 'sluice.n.01', 'name': 'sluice'}, {'id': 11001, 'synset': 'smack.n.03', 'name': 'smack'}, {'id': 11002, 'synset': 'small_boat.n.01', 'name': 'small_boat'}, {'id': 11003, 'synset': 'small_computer_system_interface.n.01', 'name': 'small_computer_system_interface'}, {'id': 11004, 'synset': 'small_ship.n.01', 'name': 'small_ship'}, {'id': 11005, 'synset': 'small_stores.n.01', 'name': 'small_stores'}, {'id': 11006, 'synset': 'smart_bomb.n.01', 'name': 'smart_bomb'}, {'id': 11007, 'synset': 'smelling_bottle.n.01', 'name': 'smelling_bottle'}, {'id': 11008, 'synset': 'smocking.n.01', 'name': 'smocking'}, {'id': 11009, 'synset': 'smoke_bomb.n.01', 'name': 'smoke_bomb'}, {'id': 11010, 'synset': 'smokehouse.n.01', 'name': 'smokehouse'}, {'id': 11011, 'synset': 'smoker.n.03', 'name': 'smoker'}, {'id': 11012, 'synset': 'smoke_screen.n.01', 'name': 'smoke_screen'}, {'id': 11013, 'synset': 'smoking_room.n.01', 'name': 'smoking_room'}, {'id': 11014, 'synset': 'smoothbore.n.01', 'name': 'smoothbore'}, {'id': 11015, 'synset': 'smooth_plane.n.01', 'name': 'smooth_plane'}, {'id': 11016, 'synset': 'snack_bar.n.01', 'name': 'snack_bar'}, {'id': 11017, 'synset': 'snaffle.n.01', 'name': 'snaffle'}, {'id': 11018, 'synset': 'snap.n.10', 'name': 'snap'}, {'id': 11019, 'synset': 'snap_brim.n.01', 'name': 'snap_brim'}, {'id': 11020, 'synset': 'snap-brim_hat.n.01', 'name': 'snap-brim_hat'}, {'id': 11021, 'synset': 'snare.n.05', 'name': 'snare'}, {'id': 11022, 'synset': 'snare_drum.n.01', 'name': 'snare_drum'}, {'id': 11023, 'synset': 'snatch_block.n.01', 'name': 'snatch_block'}, {'id': 11024, 'synset': 'snifter.n.01', 'name': 'snifter'}, {'id': 11025, 'synset': 'sniper_rifle.n.01', 'name': 'sniper_rifle'}, {'id': 11026, 'synset': 'snips.n.01', 'name': 'snips'}, {'id': 11027, 'synset': 'sno-cat.n.01', 'name': 'Sno-cat'}, {'id': 11028, 'synset': 'snood.n.01', 'name': 'snood'}, {'id': 11029, 'synset': 'snorkel.n.02', 'name': 'snorkel'}, {'id': 11030, 'synset': 'snorkel.n.01', 'name': 'snorkel'}, {'id': 11031, 'synset': 'snowbank.n.01', 'name': 'snowbank'}, {'id': 11032, 'synset': 'snowplow.n.01', 'name': 'snowplow'}, {'id': 11033, 'synset': 'snowshoe.n.01', 'name': 'snowshoe'}, {'id': 11034, 'synset': 'snowsuit.n.01', 'name': 'snowsuit'}, {'id': 11035, 'synset': 'snow_thrower.n.01', 'name': 'snow_thrower'}, {'id': 11036, 'synset': 'snuffbox.n.01', 'name': 'snuffbox'}, {'id': 11037, 'synset': 'snuffer.n.01', 'name': 'snuffer'}, {'id': 11038, 'synset': 'snuffers.n.01', 'name': 'snuffers'}, {'id': 11039, 'synset': 'soapbox.n.01', 'name': 'soapbox'}, {'id': 11040, 'synset': 'soap_dish.n.01', 'name': 'soap_dish'}, {'id': 11041, 'synset': 'soap_dispenser.n.01', 'name': 'soap_dispenser'}, {'id': 11042, 'synset': 'soap_pad.n.01', 'name': 'soap_pad'}, {'id': 11043, 'synset': 'socket.n.02', 'name': 'socket'}, {'id': 11044, 'synset': 'socket_wrench.n.01', 'name': 'socket_wrench'}, {'id': 11045, 'synset': 'socle.n.01', 'name': 'socle'}, {'id': 11046, 'synset': 'soda_can.n.01', 'name': 'soda_can'}, {'id': 11047, 'synset': 'soda_fountain.n.02', 'name': 'soda_fountain'}, {'id': 11048, 'synset': 'soda_fountain.n.01', 'name': 'soda_fountain'}, {'id': 11049, 'synset': 'sod_house.n.01', 'name': 'sod_house'}, {'id': 11050, 'synset': 'sodium-vapor_lamp.n.01', 'name': 'sodium-vapor_lamp'}, {'id': 11051, 'synset': 'soffit.n.01', 'name': 'soffit'}, {'id': 11052, 'synset': 'soft_pedal.n.01', 'name': 'soft_pedal'}, {'id': 11053, 'synset': 'soil_pipe.n.01', 'name': 'soil_pipe'}, {'id': 11054, 'synset': 'solar_cell.n.01', 'name': 'solar_cell'}, {'id': 11055, 'synset': 'solar_dish.n.01', 'name': 'solar_dish'}, {'id': 11056, 'synset': 'solar_heater.n.01', 'name': 'solar_heater'}, {'id': 11057, 'synset': 'solar_house.n.01', 'name': 'solar_house'}, {'id': 11058, 'synset': 'solar_telescope.n.01', 'name': 'solar_telescope'}, {'id': 11059, 'synset': 'solar_thermal_system.n.01', 'name': 'solar_thermal_system'}, {'id': 11060, 'synset': 'soldering_iron.n.01', 'name': 'soldering_iron'}, {'id': 11061, 'synset': 'solenoid.n.01', 'name': 'solenoid'}, {'id': 11062, 'synset': 'solleret.n.01', 'name': 'solleret'}, {'id': 11063, 'synset': 'sonic_depth_finder.n.01', 'name': 'sonic_depth_finder'}, {'id': 11064, 'synset': 'sonogram.n.01', 'name': 'sonogram'}, {'id': 11065, 'synset': 'sonograph.n.01', 'name': 'sonograph'}, {'id': 11066, 'synset': 'sorter.n.02', 'name': 'sorter'}, {'id': 11067, 'synset': 'souk.n.01', 'name': 'souk'}, {'id': 11068, 'synset': 'sound_bow.n.01', 'name': 'sound_bow'}, {'id': 11069, 'synset': 'soundbox.n.01', 'name': 'soundbox'}, {'id': 11070, 'synset': 'sound_camera.n.01', 'name': 'sound_camera'}, {'id': 11071, 'synset': 'sounder.n.01', 'name': 'sounder'}, {'id': 11072, 'synset': 'sound_film.n.01', 'name': 'sound_film'}, {'id': 11073, 'synset': 'sounding_board.n.02', 'name': 'sounding_board'}, {'id': 11074, 'synset': 'sounding_rocket.n.01', 'name': 'sounding_rocket'}, {'id': 11075, 'synset': 'sound_recording.n.01', 'name': 'sound_recording'}, {'id': 11076, 'synset': 'sound_spectrograph.n.01', 'name': 'sound_spectrograph'}, {'id': 11077, 'synset': 'soup_ladle.n.01', 'name': 'soup_ladle'}, {'id': 11078, 'synset': 'source_of_illumination.n.01', 'name': 'source_of_illumination'}, {'id': 11079, 'synset': 'sourdine.n.02', 'name': 'sourdine'}, {'id': 11080, 'synset': 'soutache.n.01', 'name': 'soutache'}, {'id': 11081, 'synset': 'soutane.n.01', 'name': 'soutane'}, {'id': 11082, 'synset': "sou'wester.n.02", 'name': "sou'wester"}, {'id': 11083, 'synset': 'soybean_future.n.01', 'name': 'soybean_future'}, {'id': 11084, 'synset': 'space_bar.n.01', 'name': 'space_bar'}, {'id': 11085, 'synset': 'space_capsule.n.01', 'name': 'space_capsule'}, {'id': 11086, 'synset': 'spacecraft.n.01', 'name': 'spacecraft'}, {'id': 11087, 'synset': 'space_heater.n.01', 'name': 'space_heater'}, {'id': 11088, 'synset': 'space_helmet.n.01', 'name': 'space_helmet'}, {'id': 11089, 'synset': 'space_rocket.n.01', 'name': 'space_rocket'}, {'id': 11090, 'synset': 'space_station.n.01', 'name': 'space_station'}, {'id': 11091, 'synset': 'spacesuit.n.01', 'name': 'spacesuit'}, {'id': 11092, 'synset': 'spade.n.02', 'name': 'spade'}, {'id': 11093, 'synset': 'spade_bit.n.01', 'name': 'spade_bit'}, {'id': 11094, 'synset': 'spaghetti_junction.n.01', 'name': 'spaghetti_junction'}, {'id': 11095, 'synset': 'spandau.n.01', 'name': 'Spandau'}, {'id': 11096, 'synset': 'spandex.n.01', 'name': 'spandex'}, {'id': 11097, 'synset': 'spandrel.n.01', 'name': 'spandrel'}, {'id': 11098, 'synset': 'spanker.n.02', 'name': 'spanker'}, {'id': 11099, 'synset': 'spar.n.02', 'name': 'spar'}, {'id': 11100, 'synset': 'sparge_pipe.n.01', 'name': 'sparge_pipe'}, {'id': 11101, 'synset': 'spark_arrester.n.02', 'name': 'spark_arrester'}, {'id': 11102, 'synset': 'spark_arrester.n.01', 'name': 'spark_arrester'}, {'id': 11103, 'synset': 'spark_chamber.n.01', 'name': 'spark_chamber'}, {'id': 11104, 'synset': 'spark_coil.n.01', 'name': 'spark_coil'}, {'id': 11105, 'synset': 'spark_gap.n.01', 'name': 'spark_gap'}, {'id': 11106, 'synset': 'spark_lever.n.01', 'name': 'spark_lever'}, {'id': 11107, 'synset': 'spark_plug.n.01', 'name': 'spark_plug'}, {'id': 11108, 'synset': 'sparkplug_wrench.n.01', 'name': 'sparkplug_wrench'}, {'id': 11109, 'synset': 'spark_transmitter.n.01', 'name': 'spark_transmitter'}, {'id': 11110, 'synset': 'spat.n.02', 'name': 'spat'}, {'id': 11111, 'synset': 'spatula.n.01', 'name': 'spatula'}, {'id': 11112, 'synset': 'speakerphone.n.01', 'name': 'speakerphone'}, {'id': 11113, 'synset': 'speaking_trumpet.n.01', 'name': 'speaking_trumpet'}, {'id': 11114, 'synset': 'spear.n.02', 'name': 'spear'}, {'id': 11115, 'synset': 'specialty_store.n.01', 'name': 'specialty_store'}, {'id': 11116, 'synset': 'specimen_bottle.n.01', 'name': 'specimen_bottle'}, {'id': 11117, 'synset': 'spectacle.n.02', 'name': 'spectacle'}, {'id': 11118, 'synset': 'spectator_pump.n.01', 'name': 'spectator_pump'}, {'id': 11119, 'synset': 'spectrograph.n.01', 'name': 'spectrograph'}, {'id': 11120, 'synset': 'spectrophotometer.n.01', 'name': 'spectrophotometer'}, {'id': 11121, 'synset': 'spectroscope.n.01', 'name': 'spectroscope'}, {'id': 11122, 'synset': 'speculum.n.02', 'name': 'speculum'}, {'id': 11123, 'synset': 'speedboat.n.01', 'name': 'speedboat'}, {'id': 11124, 'synset': 'speed_bump.n.01', 'name': 'speed_bump'}, {'id': 11125, 'synset': 'speedometer.n.01', 'name': 'speedometer'}, {'id': 11126, 'synset': 'speed_skate.n.01', 'name': 'speed_skate'}, {'id': 11127, 'synset': 'spherometer.n.01', 'name': 'spherometer'}, {'id': 11128, 'synset': 'sphygmomanometer.n.01', 'name': 'sphygmomanometer'}, {'id': 11129, 'synset': 'spicemill.n.01', 'name': 'spicemill'}, {'id': 11130, 'synset': 'spider.n.03', 'name': 'spider'}, {'id': 11131, 'synset': 'spider_web.n.01', 'name': 'spider_web'}, {'id': 11132, 'synset': 'spike.n.02', 'name': 'spike'}, {'id': 11133, 'synset': 'spike.n.11', 'name': 'spike'}, {'id': 11134, 'synset': 'spindle.n.04', 'name': 'spindle'}, {'id': 11135, 'synset': 'spindle.n.03', 'name': 'spindle'}, {'id': 11136, 'synset': 'spindle.n.02', 'name': 'spindle'}, {'id': 11137, 'synset': 'spin_dryer.n.01', 'name': 'spin_dryer'}, {'id': 11138, 'synset': 'spinet.n.02', 'name': 'spinet'}, {'id': 11139, 'synset': 'spinet.n.01', 'name': 'spinet'}, {'id': 11140, 'synset': 'spinnaker.n.01', 'name': 'spinnaker'}, {'id': 11141, 'synset': 'spinner.n.03', 'name': 'spinner'}, {'id': 11142, 'synset': 'spinning_frame.n.01', 'name': 'spinning_frame'}, {'id': 11143, 'synset': 'spinning_jenny.n.01', 'name': 'spinning_jenny'}, {'id': 11144, 'synset': 'spinning_machine.n.01', 'name': 'spinning_machine'}, {'id': 11145, 'synset': 'spinning_rod.n.01', 'name': 'spinning_rod'}, {'id': 11146, 'synset': 'spinning_wheel.n.01', 'name': 'spinning_wheel'}, {'id': 11147, 'synset': 'spiral_bandage.n.01', 'name': 'spiral_bandage'}, {'id': 11148, 'synset': 'spiral_ratchet_screwdriver.n.01', 'name': 'spiral_ratchet_screwdriver'}, {'id': 11149, 'synset': 'spiral_spring.n.01', 'name': 'spiral_spring'}, {'id': 11150, 'synset': 'spirit_lamp.n.01', 'name': 'spirit_lamp'}, {'id': 11151, 'synset': 'spirit_stove.n.01', 'name': 'spirit_stove'}, {'id': 11152, 'synset': 'spirometer.n.01', 'name': 'spirometer'}, {'id': 11153, 'synset': 'spit.n.03', 'name': 'spit'}, {'id': 11154, 'synset': 'spittoon.n.01', 'name': 'spittoon'}, {'id': 11155, 'synset': 'splashboard.n.02', 'name': 'splashboard'}, {'id': 11156, 'synset': 'splasher.n.01', 'name': 'splasher'}, {'id': 11157, 'synset': 'splice.n.01', 'name': 'splice'}, {'id': 11158, 'synset': 'splicer.n.03', 'name': 'splicer'}, {'id': 11159, 'synset': 'splint.n.02', 'name': 'splint'}, {'id': 11160, 'synset': 'split_rail.n.01', 'name': 'split_rail'}, {'id': 11161, 'synset': 'spode.n.02', 'name': 'Spode'}, {'id': 11162, 'synset': 'spoiler.n.05', 'name': 'spoiler'}, {'id': 11163, 'synset': 'spoiler.n.04', 'name': 'spoiler'}, {'id': 11164, 'synset': 'spoke.n.01', 'name': 'spoke'}, {'id': 11165, 'synset': 'spokeshave.n.01', 'name': 'spokeshave'}, {'id': 11166, 'synset': 'sponge_cloth.n.01', 'name': 'sponge_cloth'}, {'id': 11167, 'synset': 'sponge_mop.n.01', 'name': 'sponge_mop'}, {'id': 11168, 'synset': 'spoon.n.03', 'name': 'spoon'}, {'id': 11169, 'synset': 'spork.n.01', 'name': 'Spork'}, {'id': 11170, 'synset': 'sporran.n.01', 'name': 'sporran'}, {'id': 11171, 'synset': 'sport_kite.n.01', 'name': 'sport_kite'}, {'id': 11172, 'synset': 'sports_car.n.01', 'name': 'sports_car'}, {'id': 11173, 'synset': 'sports_equipment.n.01', 'name': 'sports_equipment'}, {'id': 11174, 'synset': 'sports_implement.n.01', 'name': 'sports_implement'}, {'id': 11175, 'synset': 'sport_utility.n.01', 'name': 'sport_utility'}, {'id': 11176, 'synset': 'spot.n.07', 'name': 'spot'}, {'id': 11177, 'synset': 'spot_weld.n.01', 'name': 'spot_weld'}, {'id': 11178, 'synset': 'spouter.n.02', 'name': 'spouter'}, {'id': 11179, 'synset': 'sprag.n.01', 'name': 'sprag'}, {'id': 11180, 'synset': 'spray_gun.n.01', 'name': 'spray_gun'}, {'id': 11181, 'synset': 'spray_paint.n.01', 'name': 'spray_paint'}, {'id': 11182, 'synset': 'spreader.n.01', 'name': 'spreader'}, {'id': 11183, 'synset': 'sprig.n.02', 'name': 'sprig'}, {'id': 11184, 'synset': 'spring.n.02', 'name': 'spring'}, {'id': 11185, 'synset': 'spring_balance.n.01', 'name': 'spring_balance'}, {'id': 11186, 'synset': 'springboard.n.01', 'name': 'springboard'}, {'id': 11187, 'synset': 'sprinkler.n.01', 'name': 'sprinkler'}, {'id': 11188, 'synset': 'sprinkler_system.n.01', 'name': 'sprinkler_system'}, {'id': 11189, 'synset': 'sprit.n.01', 'name': 'sprit'}, {'id': 11190, 'synset': 'spritsail.n.01', 'name': 'spritsail'}, {'id': 11191, 'synset': 'sprocket.n.02', 'name': 'sprocket'}, {'id': 11192, 'synset': 'sprocket.n.01', 'name': 'sprocket'}, {'id': 11193, 'synset': 'spun_yarn.n.01', 'name': 'spun_yarn'}, {'id': 11194, 'synset': 'spur.n.04', 'name': 'spur'}, {'id': 11195, 'synset': 'spur_gear.n.01', 'name': 'spur_gear'}, {'id': 11196, 'synset': 'sputnik.n.01', 'name': 'sputnik'}, {'id': 11197, 'synset': 'spy_satellite.n.01', 'name': 'spy_satellite'}, {'id': 11198, 'synset': 'squad_room.n.01', 'name': 'squad_room'}, {'id': 11199, 'synset': 'square.n.08', 'name': 'square'}, {'id': 11200, 'synset': 'square_knot.n.01', 'name': 'square_knot'}, {'id': 11201, 'synset': 'square-rigger.n.01', 'name': 'square-rigger'}, {'id': 11202, 'synset': 'square_sail.n.01', 'name': 'square_sail'}, {'id': 11203, 'synset': 'squash_ball.n.01', 'name': 'squash_ball'}, {'id': 11204, 'synset': 'squash_racket.n.01', 'name': 'squash_racket'}, {'id': 11205, 'synset': 'squawk_box.n.01', 'name': 'squawk_box'}, {'id': 11206, 'synset': 'squeegee.n.01', 'name': 'squeegee'}, {'id': 11207, 'synset': 'squeezer.n.01', 'name': 'squeezer'}, {'id': 11208, 'synset': 'squelch_circuit.n.01', 'name': 'squelch_circuit'}, {'id': 11209, 'synset': 'squinch.n.01', 'name': 'squinch'}, {'id': 11210, 'synset': 'stabilizer.n.03', 'name': 'stabilizer'}, {'id': 11211, 'synset': 'stabilizer.n.02', 'name': 'stabilizer'}, {'id': 11212, 'synset': 'stabilizer_bar.n.01', 'name': 'stabilizer_bar'}, {'id': 11213, 'synset': 'stable.n.01', 'name': 'stable'}, {'id': 11214, 'synset': 'stable_gear.n.01', 'name': 'stable_gear'}, {'id': 11215, 'synset': 'stabling.n.01', 'name': 'stabling'}, {'id': 11216, 'synset': 'stacks.n.02', 'name': 'stacks'}, {'id': 11217, 'synset': 'staddle.n.01', 'name': 'staddle'}, {'id': 11218, 'synset': 'stadium.n.01', 'name': 'stadium'}, {'id': 11219, 'synset': 'stage.n.03', 'name': 'stage'}, {'id': 11220, 'synset': 'stained-glass_window.n.01', 'name': 'stained-glass_window'}, {'id': 11221, 'synset': 'stair-carpet.n.01', 'name': 'stair-carpet'}, {'id': 11222, 'synset': 'stair-rod.n.01', 'name': 'stair-rod'}, {'id': 11223, 'synset': 'stairwell.n.01', 'name': 'stairwell'}, {'id': 11224, 'synset': 'stake.n.05', 'name': 'stake'}, {'id': 11225, 'synset': 'stall.n.03', 'name': 'stall'}, {'id': 11226, 'synset': 'stall.n.01', 'name': 'stall'}, {'id': 11227, 'synset': 'stamp.n.08', 'name': 'stamp'}, {'id': 11228, 'synset': 'stamp_mill.n.01', 'name': 'stamp_mill'}, {'id': 11229, 'synset': 'stamping_machine.n.01', 'name': 'stamping_machine'}, {'id': 11230, 'synset': 'stanchion.n.01', 'name': 'stanchion'}, {'id': 11231, 'synset': 'stand.n.04', 'name': 'stand'}, {'id': 11232, 'synset': 'standard.n.05', 'name': 'standard'}, {'id': 11233, 'synset': 'standard_cell.n.01', 'name': 'standard_cell'}, {'id': 11234, 'synset': 'standard_transmission.n.01', 'name': 'standard_transmission'}, {'id': 11235, 'synset': 'standing_press.n.01', 'name': 'standing_press'}, {'id': 11236, 'synset': 'stanhope.n.01', 'name': 'stanhope'}, {'id': 11237, 'synset': 'stanley_steamer.n.01', 'name': 'Stanley_Steamer'}, {'id': 11238, 'synset': 'staple.n.05', 'name': 'staple'}, {'id': 11239, 'synset': 'staple.n.04', 'name': 'staple'}, {'id': 11240, 'synset': 'staple_gun.n.01', 'name': 'staple_gun'}, {'id': 11241, 'synset': 'starship.n.01', 'name': 'starship'}, {'id': 11242, 'synset': 'starter.n.01', 'name': 'starter'}, {'id': 11243, 'synset': 'starting_gate.n.01', 'name': 'starting_gate'}, {'id': 11244, 'synset': 'stassano_furnace.n.01', 'name': 'Stassano_furnace'}, {'id': 11245, 'synset': 'statehouse.n.01', 'name': 'Statehouse'}, {'id': 11246, 'synset': 'stately_home.n.01', 'name': 'stately_home'}, {'id': 11247, 'synset': 'state_prison.n.01', 'name': 'state_prison'}, {'id': 11248, 'synset': 'stateroom.n.01', 'name': 'stateroom'}, {'id': 11249, 'synset': 'static_tube.n.01', 'name': 'static_tube'}, {'id': 11250, 'synset': 'station.n.01', 'name': 'station'}, {'id': 11251, 'synset': 'stator.n.01', 'name': 'stator'}, {'id': 11252, 'synset': 'stay.n.05', 'name': 'stay'}, {'id': 11253, 'synset': 'staysail.n.01', 'name': 'staysail'}, {'id': 11254, 'synset': 'steakhouse.n.01', 'name': 'steakhouse'}, {'id': 11255, 'synset': 'stealth_aircraft.n.01', 'name': 'stealth_aircraft'}, {'id': 11256, 'synset': 'stealth_bomber.n.01', 'name': 'stealth_bomber'}, {'id': 11257, 'synset': 'stealth_fighter.n.01', 'name': 'stealth_fighter'}, {'id': 11258, 'synset': 'steam_bath.n.01', 'name': 'steam_bath'}, {'id': 11259, 'synset': 'steamboat.n.01', 'name': 'steamboat'}, {'id': 11260, 'synset': 'steam_chest.n.01', 'name': 'steam_chest'}, {'id': 11261, 'synset': 'steam_engine.n.01', 'name': 'steam_engine'}, {'id': 11262, 'synset': 'steamer.n.03', 'name': 'steamer'}, {'id': 11263, 'synset': 'steamer.n.02', 'name': 'steamer'}, {'id': 11264, 'synset': 'steam_iron.n.01', 'name': 'steam_iron'}, {'id': 11265, 'synset': 'steam_locomotive.n.01', 'name': 'steam_locomotive'}, {'id': 11266, 'synset': 'steamroller.n.02', 'name': 'steamroller'}, {'id': 11267, 'synset': 'steam_shovel.n.01', 'name': 'steam_shovel'}, {'id': 11268, 'synset': 'steam_turbine.n.01', 'name': 'steam_turbine'}, {'id': 11269, 'synset': 'steam_whistle.n.01', 'name': 'steam_whistle'}, {'id': 11270, 'synset': 'steel.n.03', 'name': 'steel'}, {'id': 11271, 'synset': 'steel_arch_bridge.n.01', 'name': 'steel_arch_bridge'}, {'id': 11272, 'synset': 'steel_drum.n.01', 'name': 'steel_drum'}, {'id': 11273, 'synset': 'steel_mill.n.01', 'name': 'steel_mill'}, {'id': 11274, 'synset': 'steel-wool_pad.n.01', 'name': 'steel-wool_pad'}, {'id': 11275, 'synset': 'steelyard.n.01', 'name': 'steelyard'}, {'id': 11276, 'synset': 'steeple.n.01', 'name': 'steeple'}, {'id': 11277, 'synset': 'steerage.n.01', 'name': 'steerage'}, {'id': 11278, 'synset': 'steering_gear.n.01', 'name': 'steering_gear'}, {'id': 11279, 'synset': 'steering_linkage.n.01', 'name': 'steering_linkage'}, {'id': 11280, 'synset': 'steering_system.n.01', 'name': 'steering_system'}, {'id': 11281, 'synset': 'stele.n.02', 'name': 'stele'}, {'id': 11282, 'synset': 'stem-winder.n.01', 'name': 'stem-winder'}, {'id': 11283, 'synset': 'stencil.n.01', 'name': 'stencil'}, {'id': 11284, 'synset': 'sten_gun.n.01', 'name': 'Sten_gun'}, {'id': 11285, 'synset': 'stenograph.n.02', 'name': 'stenograph'}, {'id': 11286, 'synset': 'step.n.04', 'name': 'step'}, {'id': 11287, 'synset': 'step-down_transformer.n.01', 'name': 'step-down_transformer'}, {'id': 11288, 'synset': 'step-up_transformer.n.01', 'name': 'step-up_transformer'}, {'id': 11289, 'synset': 'stereoscope.n.01', 'name': 'stereoscope'}, {'id': 11290, 'synset': 'stern_chaser.n.01', 'name': 'stern_chaser'}, {'id': 11291, 'synset': 'sternpost.n.01', 'name': 'sternpost'}, {'id': 11292, 'synset': 'sternwheeler.n.01', 'name': 'sternwheeler'}, {'id': 11293, 'synset': 'stethoscope.n.01', 'name': 'stethoscope'}, {'id': 11294, 'synset': 'stewing_pan.n.01', 'name': 'stewing_pan'}, {'id': 11295, 'synset': 'stick.n.01', 'name': 'stick'}, {'id': 11296, 'synset': 'stick.n.07', 'name': 'stick'}, {'id': 11297, 'synset': 'stick.n.03', 'name': 'stick'}, {'id': 11298, 'synset': 'stick.n.06', 'name': 'stick'}, {'id': 11299, 'synset': 'stile.n.01', 'name': 'stile'}, {'id': 11300, 'synset': 'stiletto.n.01', 'name': 'stiletto'}, {'id': 11301, 'synset': 'still.n.03', 'name': 'still'}, {'id': 11302, 'synset': 'stillroom.n.01', 'name': 'stillroom'}, {'id': 11303, 'synset': 'stillson_wrench.n.01', 'name': 'Stillson_wrench'}, {'id': 11304, 'synset': 'stilt.n.02', 'name': 'stilt'}, {'id': 11305, 'synset': 'stinger.n.03', 'name': 'Stinger'}, {'id': 11306, 'synset': 'stink_bomb.n.01', 'name': 'stink_bomb'}, {'id': 11307, 'synset': 'stirrup_pump.n.01', 'name': 'stirrup_pump'}, {'id': 11308, 'synset': 'stob.n.01', 'name': 'stob'}, {'id': 11309, 'synset': 'stock.n.03', 'name': 'stock'}, {'id': 11310, 'synset': 'stockade.n.01', 'name': 'stockade'}, {'id': 11311, 'synset': 'stockcar.n.01', 'name': 'stockcar'}, {'id': 11312, 'synset': 'stock_car.n.02', 'name': 'stock_car'}, {'id': 11313, 'synset': 'stockinet.n.01', 'name': 'stockinet'}, {'id': 11314, 'synset': 'stocking.n.01', 'name': 'stocking'}, {'id': 11315, 'synset': 'stock-in-trade.n.01', 'name': 'stock-in-trade'}, {'id': 11316, 'synset': 'stockpot.n.01', 'name': 'stockpot'}, {'id': 11317, 'synset': 'stockroom.n.01', 'name': 'stockroom'}, {'id': 11318, 'synset': 'stocks.n.03', 'name': 'stocks'}, {'id': 11319, 'synset': 'stock_saddle.n.01', 'name': 'stock_saddle'}, {'id': 11320, 'synset': 'stockyard.n.01', 'name': 'stockyard'}, {'id': 11321, 'synset': 'stole.n.01', 'name': 'stole'}, {'id': 11322, 'synset': 'stomacher.n.01', 'name': 'stomacher'}, {'id': 11323, 'synset': 'stomach_pump.n.01', 'name': 'stomach_pump'}, {'id': 11324, 'synset': 'stone_wall.n.01', 'name': 'stone_wall'}, {'id': 11325, 'synset': 'stoneware.n.01', 'name': 'stoneware'}, {'id': 11326, 'synset': 'stonework.n.01', 'name': 'stonework'}, {'id': 11327, 'synset': 'stoop.n.03', 'name': 'stoop'}, {'id': 11328, 'synset': 'stop_bath.n.01', 'name': 'stop_bath'}, {'id': 11329, 'synset': 'stopcock.n.01', 'name': 'stopcock'}, {'id': 11330, 'synset': 'stopper_knot.n.01', 'name': 'stopper_knot'}, {'id': 11331, 'synset': 'stopwatch.n.01', 'name': 'stopwatch'}, {'id': 11332, 'synset': 'storage_battery.n.01', 'name': 'storage_battery'}, {'id': 11333, 'synset': 'storage_cell.n.01', 'name': 'storage_cell'}, {'id': 11334, 'synset': 'storage_ring.n.01', 'name': 'storage_ring'}, {'id': 11335, 'synset': 'storage_space.n.01', 'name': 'storage_space'}, {'id': 11336, 'synset': 'storeroom.n.01', 'name': 'storeroom'}, {'id': 11337, 'synset': 'storm_cellar.n.01', 'name': 'storm_cellar'}, {'id': 11338, 'synset': 'storm_door.n.01', 'name': 'storm_door'}, {'id': 11339, 'synset': 'storm_window.n.01', 'name': 'storm_window'}, {'id': 11340, 'synset': 'stoup.n.02', 'name': 'stoup'}, {'id': 11341, 'synset': 'stoup.n.01', 'name': 'stoup'}, {'id': 11342, 'synset': 'stove.n.02', 'name': 'stove'}, {'id': 11343, 'synset': 'stove_bolt.n.01', 'name': 'stove_bolt'}, {'id': 11344, 'synset': 'stovepipe.n.01', 'name': 'stovepipe'}, {'id': 11345, 'synset': 'stovepipe_iron.n.01', 'name': 'stovepipe_iron'}, {'id': 11346, 'synset': 'stradavarius.n.01', 'name': 'Stradavarius'}, {'id': 11347, 'synset': 'straight_chair.n.01', 'name': 'straight_chair'}, {'id': 11348, 'synset': 'straightedge.n.01', 'name': 'straightedge'}, {'id': 11349, 'synset': 'straightener.n.01', 'name': 'straightener'}, {'id': 11350, 'synset': 'straight_flute.n.01', 'name': 'straight_flute'}, {'id': 11351, 'synset': 'straight_pin.n.01', 'name': 'straight_pin'}, {'id': 11352, 'synset': 'straight_razor.n.01', 'name': 'straight_razor'}, {'id': 11353, 'synset': 'straitjacket.n.02', 'name': 'straitjacket'}, {'id': 11354, 'synset': 'strap.n.04', 'name': 'strap'}, {'id': 11355, 'synset': 'strap_hinge.n.01', 'name': 'strap_hinge'}, {'id': 11356, 'synset': 'strapless.n.01', 'name': 'strapless'}, {'id': 11357, 'synset': 'streamer_fly.n.01', 'name': 'streamer_fly'}, {'id': 11358, 'synset': 'streamliner.n.01', 'name': 'streamliner'}, {'id': 11359, 'synset': 'street.n.01', 'name': 'street'}, {'id': 11360, 'synset': 'street.n.02', 'name': 'street'}, {'id': 11361, 'synset': 'streetcar.n.01', 'name': 'streetcar'}, {'id': 11362, 'synset': 'street_clothes.n.01', 'name': 'street_clothes'}, {'id': 11363, 'synset': 'stretcher.n.03', 'name': 'stretcher'}, {'id': 11364, 'synset': 'stretcher.n.01', 'name': 'stretcher'}, {'id': 11365, 'synset': 'stretch_pants.n.01', 'name': 'stretch_pants'}, {'id': 11366, 'synset': 'strickle.n.02', 'name': 'strickle'}, {'id': 11367, 'synset': 'strickle.n.01', 'name': 'strickle'}, {'id': 11368, 'synset': 'stringed_instrument.n.01', 'name': 'stringed_instrument'}, {'id': 11369, 'synset': 'stringer.n.04', 'name': 'stringer'}, {'id': 11370, 'synset': 'stringer.n.03', 'name': 'stringer'}, {'id': 11371, 'synset': 'string_tie.n.01', 'name': 'string_tie'}, {'id': 11372, 'synset': 'strip.n.05', 'name': 'strip'}, {'id': 11373, 'synset': 'strip_lighting.n.01', 'name': 'strip_lighting'}, {'id': 11374, 'synset': 'strip_mall.n.01', 'name': 'strip_mall'}, {'id': 11375, 'synset': 'stroboscope.n.01', 'name': 'stroboscope'}, {'id': 11376, 'synset': 'strongbox.n.01', 'name': 'strongbox'}, {'id': 11377, 'synset': 'stronghold.n.01', 'name': 'stronghold'}, {'id': 11378, 'synset': 'strongroom.n.01', 'name': 'strongroom'}, {'id': 11379, 'synset': 'strop.n.01', 'name': 'strop'}, {'id': 11380, 'synset': 'structural_member.n.01', 'name': 'structural_member'}, {'id': 11381, 'synset': 'structure.n.01', 'name': 'structure'}, {'id': 11382, 'synset': 'student_center.n.01', 'name': 'student_center'}, {'id': 11383, 'synset': 'student_lamp.n.01', 'name': 'student_lamp'}, {'id': 11384, 'synset': 'student_union.n.01', 'name': 'student_union'}, {'id': 11385, 'synset': 'stud_finder.n.01', 'name': 'stud_finder'}, {'id': 11386, 'synset': 'studio_apartment.n.01', 'name': 'studio_apartment'}, {'id': 11387, 'synset': 'studio_couch.n.01', 'name': 'studio_couch'}, {'id': 11388, 'synset': 'study.n.05', 'name': 'study'}, {'id': 11389, 'synset': 'study_hall.n.02', 'name': 'study_hall'}, {'id': 11390, 'synset': 'stuffing_nut.n.01', 'name': 'stuffing_nut'}, {'id': 11391, 'synset': 'stump.n.03', 'name': 'stump'}, {'id': 11392, 'synset': 'stun_gun.n.01', 'name': 'stun_gun'}, {'id': 11393, 'synset': 'stupa.n.01', 'name': 'stupa'}, {'id': 11394, 'synset': 'sty.n.02', 'name': 'sty'}, {'id': 11395, 'synset': 'stylus.n.01', 'name': 'stylus'}, {'id': 11396, 'synset': 'sub-assembly.n.01', 'name': 'sub-assembly'}, {'id': 11397, 'synset': 'subcompact.n.01', 'name': 'subcompact'}, {'id': 11398, 'synset': 'submachine_gun.n.01', 'name': 'submachine_gun'}, {'id': 11399, 'synset': 'submarine.n.01', 'name': 'submarine'}, {'id': 11400, 'synset': 'submarine_torpedo.n.01', 'name': 'submarine_torpedo'}, {'id': 11401, 'synset': 'submersible.n.02', 'name': 'submersible'}, {'id': 11402, 'synset': 'submersible.n.01', 'name': 'submersible'}, {'id': 11403, 'synset': 'subtracter.n.02', 'name': 'subtracter'}, {'id': 11404, 'synset': 'subway_token.n.01', 'name': 'subway_token'}, {'id': 11405, 'synset': 'subway_train.n.01', 'name': 'subway_train'}, {'id': 11406, 'synset': 'suction_cup.n.01', 'name': 'suction_cup'}, {'id': 11407, 'synset': 'suction_pump.n.01', 'name': 'suction_pump'}, {'id': 11408, 'synset': 'sudatorium.n.01', 'name': 'sudatorium'}, {'id': 11409, 'synset': 'suede_cloth.n.01', 'name': 'suede_cloth'}, {'id': 11410, 'synset': 'sugar_refinery.n.01', 'name': 'sugar_refinery'}, {'id': 11411, 'synset': 'sugar_spoon.n.01', 'name': 'sugar_spoon'}, {'id': 11412, 'synset': 'suite.n.02', 'name': 'suite'}, {'id': 11413, 'synset': 'suiting.n.01', 'name': 'suiting'}, {'id': 11414, 'synset': 'sulky.n.01', 'name': 'sulky'}, {'id': 11415, 'synset': 'summer_house.n.01', 'name': 'summer_house'}, {'id': 11416, 'synset': 'sumo_ring.n.01', 'name': 'sumo_ring'}, {'id': 11417, 'synset': 'sump.n.01', 'name': 'sump'}, {'id': 11418, 'synset': 'sump_pump.n.01', 'name': 'sump_pump'}, {'id': 11419, 'synset': 'sunbonnet.n.01', 'name': 'sunbonnet'}, {'id': 11420, 'synset': 'sunday_best.n.01', 'name': 'Sunday_best'}, {'id': 11421, 'synset': 'sun_deck.n.01', 'name': 'sun_deck'}, {'id': 11422, 'synset': 'sundial.n.01', 'name': 'sundial'}, {'id': 11423, 'synset': 'sundress.n.01', 'name': 'sundress'}, {'id': 11424, 'synset': 'sundries.n.01', 'name': 'sundries'}, {'id': 11425, 'synset': 'sun_gear.n.01', 'name': 'sun_gear'}, {'id': 11426, 'synset': 'sunglass.n.01', 'name': 'sunglass'}, {'id': 11427, 'synset': 'sunlamp.n.01', 'name': 'sunlamp'}, {'id': 11428, 'synset': 'sun_parlor.n.01', 'name': 'sun_parlor'}, {'id': 11429, 'synset': 'sunroof.n.01', 'name': 'sunroof'}, {'id': 11430, 'synset': 'sunscreen.n.01', 'name': 'sunscreen'}, {'id': 11431, 'synset': 'sunsuit.n.01', 'name': 'sunsuit'}, {'id': 11432, 'synset': 'supercharger.n.01', 'name': 'supercharger'}, {'id': 11433, 'synset': 'supercomputer.n.01', 'name': 'supercomputer'}, {'id': 11434, 'synset': 'superconducting_supercollider.n.01', 'name': 'superconducting_supercollider'}, {'id': 11435, 'synset': 'superhighway.n.02', 'name': 'superhighway'}, {'id': 11436, 'synset': 'supermarket.n.01', 'name': 'supermarket'}, {'id': 11437, 'synset': 'superstructure.n.01', 'name': 'superstructure'}, {'id': 11438, 'synset': 'supertanker.n.01', 'name': 'supertanker'}, {'id': 11439, 'synset': 'supper_club.n.01', 'name': 'supper_club'}, {'id': 11440, 'synset': 'supplejack.n.01', 'name': 'supplejack'}, {'id': 11441, 'synset': 'supply_chamber.n.01', 'name': 'supply_chamber'}, {'id': 11442, 'synset': 'supply_closet.n.01', 'name': 'supply_closet'}, {'id': 11443, 'synset': 'support.n.10', 'name': 'support'}, {'id': 11444, 'synset': 'support.n.07', 'name': 'support'}, {'id': 11445, 'synset': 'support_column.n.01', 'name': 'support_column'}, {'id': 11446, 'synset': 'support_hose.n.01', 'name': 'support_hose'}, {'id': 11447, 'synset': 'supporting_structure.n.01', 'name': 'supporting_structure'}, {'id': 11448, 'synset': 'supporting_tower.n.01', 'name': 'supporting_tower'}, {'id': 11449, 'synset': 'surcoat.n.02', 'name': 'surcoat'}, {'id': 11450, 'synset': 'surface_gauge.n.01', 'name': 'surface_gauge'}, {'id': 11451, 'synset': 'surface_lift.n.01', 'name': 'surface_lift'}, {'id': 11452, 'synset': 'surface_search_radar.n.01', 'name': 'surface_search_radar'}, {'id': 11453, 'synset': 'surface_ship.n.01', 'name': 'surface_ship'}, {'id': 11454, 'synset': 'surface-to-air_missile.n.01', 'name': 'surface-to-air_missile'}, {'id': 11455, 'synset': 'surface-to-air_missile_system.n.01', 'name': 'surface-to-air_missile_system'}, {'id': 11456, 'synset': 'surfboat.n.01', 'name': 'surfboat'}, {'id': 11457, 'synset': 'surcoat.n.01', 'name': 'surcoat'}, {'id': 11458, 'synset': "surgeon's_knot.n.01", 'name': "surgeon's_knot"}, {'id': 11459, 'synset': 'surgery.n.02', 'name': 'surgery'}, {'id': 11460, 'synset': 'surge_suppressor.n.01', 'name': 'surge_suppressor'}, {'id': 11461, 'synset': 'surgical_dressing.n.01', 'name': 'surgical_dressing'}, {'id': 11462, 'synset': 'surgical_instrument.n.01', 'name': 'surgical_instrument'}, {'id': 11463, 'synset': 'surgical_knife.n.01', 'name': 'surgical_knife'}, {'id': 11464, 'synset': 'surplice.n.01', 'name': 'surplice'}, {'id': 11465, 'synset': 'surrey.n.02', 'name': 'surrey'}, {'id': 11466, 'synset': 'surtout.n.01', 'name': 'surtout'}, {'id': 11467, 'synset': 'surveillance_system.n.01', 'name': 'surveillance_system'}, {'id': 11468, 'synset': 'surveying_instrument.n.01', 'name': 'surveying_instrument'}, {'id': 11469, 'synset': "surveyor's_level.n.01", 'name': "surveyor's_level"}, {'id': 11470, 'synset': 'sushi_bar.n.01', 'name': 'sushi_bar'}, {'id': 11471, 'synset': 'suspension.n.05', 'name': 'suspension'}, {'id': 11472, 'synset': 'suspension_bridge.n.01', 'name': 'suspension_bridge'}, {'id': 11473, 'synset': 'suspensory.n.01', 'name': 'suspensory'}, {'id': 11474, 'synset': 'sustaining_pedal.n.01', 'name': 'sustaining_pedal'}, {'id': 11475, 'synset': 'suture.n.02', 'name': 'suture'}, {'id': 11476, 'synset': 'swab.n.01', 'name': 'swab'}, {'id': 11477, 'synset': 'swaddling_clothes.n.01', 'name': 'swaddling_clothes'}, {'id': 11478, 'synset': 'swag.n.03', 'name': 'swag'}, {'id': 11479, 'synset': 'swage_block.n.01', 'name': 'swage_block'}, {'id': 11480, 'synset': 'swagger_stick.n.01', 'name': 'swagger_stick'}, {'id': 11481, 'synset': 'swallow-tailed_coat.n.01', 'name': 'swallow-tailed_coat'}, {'id': 11482, 'synset': 'swamp_buggy.n.01', 'name': 'swamp_buggy'}, {'id': 11483, 'synset': "swan's_down.n.01", 'name': "swan's_down"}, {'id': 11484, 'synset': 'swathe.n.01', 'name': 'swathe'}, {'id': 11485, 'synset': 'swatter.n.01', 'name': 'swatter'}, {'id': 11486, 'synset': 'sweat_bag.n.01', 'name': 'sweat_bag'}, {'id': 11487, 'synset': 'sweatband.n.01', 'name': 'sweatband'}, {'id': 11488, 'synset': 'sweatshop.n.01', 'name': 'sweatshop'}, {'id': 11489, 'synset': 'sweat_suit.n.01', 'name': 'sweat_suit'}, {'id': 11490, 'synset': 'sweep.n.04', 'name': 'sweep'}, {'id': 11491, 'synset': 'sweep_hand.n.01', 'name': 'sweep_hand'}, {'id': 11492, 'synset': 'swimming_trunks.n.01', 'name': 'swimming_trunks'}, {'id': 11493, 'synset': 'swing.n.02', 'name': 'swing'}, {'id': 11494, 'synset': 'swing_door.n.01', 'name': 'swing_door'}, {'id': 11495, 'synset': 'switch.n.01', 'name': 'switch'}, {'id': 11496, 'synset': 'switchblade.n.01', 'name': 'switchblade'}, {'id': 11497, 'synset': 'switch_engine.n.01', 'name': 'switch_engine'}, {'id': 11498, 'synset': 'swivel.n.01', 'name': 'swivel'}, {'id': 11499, 'synset': 'swivel_chair.n.01', 'name': 'swivel_chair'}, {'id': 11500, 'synset': 'swizzle_stick.n.01', 'name': 'swizzle_stick'}, {'id': 11501, 'synset': 'sword_cane.n.01', 'name': 'sword_cane'}, {'id': 11502, 'synset': 's_wrench.n.01', 'name': 'S_wrench'}, {'id': 11503, 'synset': 'synagogue.n.01', 'name': 'synagogue'}, {'id': 11504, 'synset': 'synchrocyclotron.n.01', 'name': 'synchrocyclotron'}, {'id': 11505, 'synset': 'synchroflash.n.01', 'name': 'synchroflash'}, {'id': 11506, 'synset': 'synchromesh.n.01', 'name': 'synchromesh'}, {'id': 11507, 'synset': 'synchronous_converter.n.01', 'name': 'synchronous_converter'}, {'id': 11508, 'synset': 'synchronous_motor.n.01', 'name': 'synchronous_motor'}, {'id': 11509, 'synset': 'synchrotron.n.01', 'name': 'synchrotron'}, {'id': 11510, 'synset': 'synchroscope.n.01', 'name': 'synchroscope'}, {'id': 11511, 'synset': 'synthesizer.n.02', 'name': 'synthesizer'}, {'id': 11512, 'synset': 'system.n.01', 'name': 'system'}, {'id': 11513, 'synset': 'tabard.n.01', 'name': 'tabard'}, {'id': 11514, 'synset': 'tabernacle.n.02', 'name': 'Tabernacle'}, {'id': 11515, 'synset': 'tabi.n.01', 'name': 'tabi'}, {'id': 11516, 'synset': 'tab_key.n.01', 'name': 'tab_key'}, {'id': 11517, 'synset': 'table.n.03', 'name': 'table'}, {'id': 11518, 'synset': 'tablefork.n.01', 'name': 'tablefork'}, {'id': 11519, 'synset': 'table_knife.n.01', 'name': 'table_knife'}, {'id': 11520, 'synset': 'table_saw.n.01', 'name': 'table_saw'}, {'id': 11521, 'synset': 'tablespoon.n.02', 'name': 'tablespoon'}, {'id': 11522, 'synset': 'tablet-armed_chair.n.01', 'name': 'tablet-armed_chair'}, {'id': 11523, 'synset': 'table-tennis_racquet.n.01', 'name': 'table-tennis_racquet'}, {'id': 11524, 'synset': 'tabletop.n.01', 'name': 'tabletop'}, {'id': 11525, 'synset': 'tableware.n.01', 'name': 'tableware'}, {'id': 11526, 'synset': 'tabor.n.01', 'name': 'tabor'}, {'id': 11527, 'synset': 'taboret.n.01', 'name': 'taboret'}, {'id': 11528, 'synset': 'tachistoscope.n.01', 'name': 'tachistoscope'}, {'id': 11529, 'synset': 'tachograph.n.01', 'name': 'tachograph'}, {'id': 11530, 'synset': 'tachymeter.n.01', 'name': 'tachymeter'}, {'id': 11531, 'synset': 'tack.n.02', 'name': 'tack'}, {'id': 11532, 'synset': 'tack_hammer.n.01', 'name': 'tack_hammer'}, {'id': 11533, 'synset': 'taffeta.n.01', 'name': 'taffeta'}, {'id': 11534, 'synset': 'taffrail.n.01', 'name': 'taffrail'}, {'id': 11535, 'synset': 'tailgate.n.01', 'name': 'tailgate'}, {'id': 11536, 'synset': 'tailor-made.n.01', 'name': 'tailor-made'}, {'id': 11537, 'synset': "tailor's_chalk.n.01", 'name': "tailor's_chalk"}, {'id': 11538, 'synset': 'tailpipe.n.01', 'name': 'tailpipe'}, {'id': 11539, 'synset': 'tail_rotor.n.01', 'name': 'tail_rotor'}, {'id': 11540, 'synset': 'tailstock.n.01', 'name': 'tailstock'}, {'id': 11541, 'synset': 'take-up.n.01', 'name': 'take-up'}, {'id': 11542, 'synset': 'talaria.n.01', 'name': 'talaria'}, {'id': 11543, 'synset': 'talcum.n.02', 'name': 'talcum'}, {'id': 11544, 'synset': 'tam.n.01', 'name': 'tam'}, {'id': 11545, 'synset': 'tambour.n.02', 'name': 'tambour'}, {'id': 11546, 'synset': 'tambour.n.01', 'name': 'tambour'}, {'id': 11547, 'synset': 'tammy.n.01', 'name': 'tammy'}, {'id': 11548, 'synset': 'tamp.n.01', 'name': 'tamp'}, {'id': 11549, 'synset': 'tampax.n.01', 'name': 'Tampax'}, {'id': 11550, 'synset': 'tampion.n.01', 'name': 'tampion'}, {'id': 11551, 'synset': 'tampon.n.01', 'name': 'tampon'}, {'id': 11552, 'synset': 'tandoor.n.01', 'name': 'tandoor'}, {'id': 11553, 'synset': 'tangram.n.01', 'name': 'tangram'}, {'id': 11554, 'synset': 'tankard.n.01', 'name': 'tankard'}, {'id': 11555, 'synset': 'tank_car.n.01', 'name': 'tank_car'}, {'id': 11556, 'synset': 'tank_destroyer.n.01', 'name': 'tank_destroyer'}, {'id': 11557, 'synset': 'tank_engine.n.01', 'name': 'tank_engine'}, {'id': 11558, 'synset': 'tanker_plane.n.01', 'name': 'tanker_plane'}, {'id': 11559, 'synset': 'tank_shell.n.01', 'name': 'tank_shell'}, {'id': 11560, 'synset': 'tannoy.n.01', 'name': 'tannoy'}, {'id': 11561, 'synset': 'tap.n.06', 'name': 'tap'}, {'id': 11562, 'synset': 'tapa.n.02', 'name': 'tapa'}, {'id': 11563, 'synset': 'tape.n.02', 'name': 'tape'}, {'id': 11564, 'synset': 'tape_deck.n.01', 'name': 'tape_deck'}, {'id': 11565, 'synset': 'tape_drive.n.01', 'name': 'tape_drive'}, {'id': 11566, 'synset': 'tape_player.n.01', 'name': 'tape_player'}, {'id': 11567, 'synset': 'tape_recorder.n.01', 'name': 'tape_recorder'}, {'id': 11568, 'synset': 'taper_file.n.01', 'name': 'taper_file'}, {'id': 11569, 'synset': 'tappet.n.01', 'name': 'tappet'}, {'id': 11570, 'synset': 'tap_wrench.n.01', 'name': 'tap_wrench'}, {'id': 11571, 'synset': 'tare.n.05', 'name': 'tare'}, {'id': 11572, 'synset': 'target.n.04', 'name': 'target'}, {'id': 11573, 'synset': 'target_acquisition_system.n.01', 'name': 'target_acquisition_system'}, {'id': 11574, 'synset': 'tarmacadam.n.02', 'name': 'tarmacadam'}, {'id': 11575, 'synset': 'tasset.n.01', 'name': 'tasset'}, {'id': 11576, 'synset': 'tattoo.n.02', 'name': 'tattoo'}, {'id': 11577, 'synset': 'tavern.n.01', 'name': 'tavern'}, {'id': 11578, 'synset': 'tawse.n.01', 'name': 'tawse'}, {'id': 11579, 'synset': 'taximeter.n.01', 'name': 'taximeter'}, {'id': 11580, 'synset': 't-bar_lift.n.01', 'name': 'T-bar_lift'}, {'id': 11581, 'synset': 'tea_bag.n.02', 'name': 'tea_bag'}, {'id': 11582, 'synset': 'tea_ball.n.01', 'name': 'tea_ball'}, {'id': 11583, 'synset': 'tea_cart.n.01', 'name': 'tea_cart'}, {'id': 11584, 'synset': 'tea_chest.n.01', 'name': 'tea_chest'}, {'id': 11585, 'synset': 'teaching_aid.n.01', 'name': 'teaching_aid'}, {'id': 11586, 'synset': 'tea_gown.n.01', 'name': 'tea_gown'}, {'id': 11587, 'synset': 'tea_maker.n.01', 'name': 'tea_maker'}, {'id': 11588, 'synset': 'teashop.n.01', 'name': 'teashop'}, {'id': 11589, 'synset': 'teaspoon.n.02', 'name': 'teaspoon'}, {'id': 11590, 'synset': 'tea-strainer.n.01', 'name': 'tea-strainer'}, {'id': 11591, 'synset': 'tea_table.n.01', 'name': 'tea_table'}, {'id': 11592, 'synset': 'tea_tray.n.01', 'name': 'tea_tray'}, {'id': 11593, 'synset': 'tea_urn.n.01', 'name': 'tea_urn'}, {'id': 11594, 'synset': 'tee.n.03', 'name': 'tee'}, {'id': 11595, 'synset': 'tee_hinge.n.01', 'name': 'tee_hinge'}, {'id': 11596, 'synset': 'telecom_hotel.n.01', 'name': 'telecom_hotel'}, {'id': 11597, 'synset': 'telecommunication_system.n.01', 'name': 'telecommunication_system'}, {'id': 11598, 'synset': 'telegraph.n.01', 'name': 'telegraph'}, {'id': 11599, 'synset': 'telegraph_key.n.01', 'name': 'telegraph_key'}, {'id': 11600, 'synset': 'telemeter.n.01', 'name': 'telemeter'}, {'id': 11601, 'synset': 'telephone_bell.n.01', 'name': 'telephone_bell'}, {'id': 11602, 'synset': 'telephone_cord.n.01', 'name': 'telephone_cord'}, {'id': 11603, 'synset': 'telephone_jack.n.01', 'name': 'telephone_jack'}, {'id': 11604, 'synset': 'telephone_line.n.02', 'name': 'telephone_line'}, {'id': 11605, 'synset': 'telephone_plug.n.01', 'name': 'telephone_plug'}, {'id': 11606, 'synset': 'telephone_receiver.n.01', 'name': 'telephone_receiver'}, {'id': 11607, 'synset': 'telephone_system.n.01', 'name': 'telephone_system'}, {'id': 11608, 'synset': 'telephone_wire.n.01', 'name': 'telephone_wire'}, {'id': 11609, 'synset': 'teleprompter.n.01', 'name': 'Teleprompter'}, {'id': 11610, 'synset': 'telescope.n.01', 'name': 'telescope'}, {'id': 11611, 'synset': 'telescopic_sight.n.01', 'name': 'telescopic_sight'}, {'id': 11612, 'synset': 'telethermometer.n.01', 'name': 'telethermometer'}, {'id': 11613, 'synset': 'teletypewriter.n.01', 'name': 'teletypewriter'}, {'id': 11614, 'synset': 'television.n.02', 'name': 'television'}, {'id': 11615, 'synset': 'television_antenna.n.01', 'name': 'television_antenna'}, {'id': 11616, 'synset': 'television_equipment.n.01', 'name': 'television_equipment'}, {'id': 11617, 'synset': 'television_monitor.n.01', 'name': 'television_monitor'}, {'id': 11618, 'synset': 'television_room.n.01', 'name': 'television_room'}, {'id': 11619, 'synset': 'television_transmitter.n.01', 'name': 'television_transmitter'}, {'id': 11620, 'synset': 'telpher.n.01', 'name': 'telpher'}, {'id': 11621, 'synset': 'telpherage.n.01', 'name': 'telpherage'}, {'id': 11622, 'synset': 'tempera.n.01', 'name': 'tempera'}, {'id': 11623, 'synset': 'temple.n.01', 'name': 'temple'}, {'id': 11624, 'synset': 'temple.n.03', 'name': 'temple'}, {'id': 11625, 'synset': 'temporary_hookup.n.01', 'name': 'temporary_hookup'}, {'id': 11626, 'synset': 'tender.n.06', 'name': 'tender'}, {'id': 11627, 'synset': 'tender.n.05', 'name': 'tender'}, {'id': 11628, 'synset': 'tender.n.04', 'name': 'tender'}, {'id': 11629, 'synset': 'tenement.n.01', 'name': 'tenement'}, {'id': 11630, 'synset': 'tennis_camp.n.01', 'name': 'tennis_camp'}, {'id': 11631, 'synset': 'tenon.n.01', 'name': 'tenon'}, {'id': 11632, 'synset': 'tenor_drum.n.01', 'name': 'tenor_drum'}, {'id': 11633, 'synset': 'tenoroon.n.01', 'name': 'tenoroon'}, {'id': 11634, 'synset': 'tenpenny_nail.n.01', 'name': 'tenpenny_nail'}, {'id': 11635, 'synset': 'tenpin.n.01', 'name': 'tenpin'}, {'id': 11636, 'synset': 'tensimeter.n.01', 'name': 'tensimeter'}, {'id': 11637, 'synset': 'tensiometer.n.03', 'name': 'tensiometer'}, {'id': 11638, 'synset': 'tensiometer.n.02', 'name': 'tensiometer'}, {'id': 11639, 'synset': 'tensiometer.n.01', 'name': 'tensiometer'}, {'id': 11640, 'synset': 'tent.n.01', 'name': 'tent'}, {'id': 11641, 'synset': 'tenter.n.01', 'name': 'tenter'}, {'id': 11642, 'synset': 'tenterhook.n.01', 'name': 'tenterhook'}, {'id': 11643, 'synset': 'tent-fly.n.01', 'name': 'tent-fly'}, {'id': 11644, 'synset': 'tent_peg.n.01', 'name': 'tent_peg'}, {'id': 11645, 'synset': 'tepee.n.01', 'name': 'tepee'}, {'id': 11646, 'synset': 'terminal.n.02', 'name': 'terminal'}, {'id': 11647, 'synset': 'terminal.n.04', 'name': 'terminal'}, {'id': 11648, 'synset': 'terraced_house.n.01', 'name': 'terraced_house'}, {'id': 11649, 'synset': 'terra_cotta.n.01', 'name': 'terra_cotta'}, {'id': 11650, 'synset': 'terrarium.n.01', 'name': 'terrarium'}, {'id': 11651, 'synset': 'terra_sigillata.n.01', 'name': 'terra_sigillata'}, {'id': 11652, 'synset': 'terry.n.02', 'name': 'terry'}, {'id': 11653, 'synset': 'tesla_coil.n.01', 'name': 'Tesla_coil'}, {'id': 11654, 'synset': 'tessera.n.01', 'name': 'tessera'}, {'id': 11655, 'synset': 'test_equipment.n.01', 'name': 'test_equipment'}, {'id': 11656, 'synset': 'test_rocket.n.01', 'name': 'test_rocket'}, {'id': 11657, 'synset': 'test_room.n.01', 'name': 'test_room'}, {'id': 11658, 'synset': 'testudo.n.01', 'name': 'testudo'}, {'id': 11659, 'synset': 'tetraskelion.n.01', 'name': 'tetraskelion'}, {'id': 11660, 'synset': 'tetrode.n.01', 'name': 'tetrode'}, {'id': 11661, 'synset': 'textile_machine.n.01', 'name': 'textile_machine'}, {'id': 11662, 'synset': 'textile_mill.n.01', 'name': 'textile_mill'}, {'id': 11663, 'synset': 'thatch.n.04', 'name': 'thatch'}, {'id': 11664, 'synset': 'theater.n.01', 'name': 'theater'}, {'id': 11665, 'synset': 'theater_curtain.n.01', 'name': 'theater_curtain'}, {'id': 11666, 'synset': 'theater_light.n.01', 'name': 'theater_light'}, {'id': 11667, 'synset': 'theodolite.n.01', 'name': 'theodolite'}, {'id': 11668, 'synset': 'theremin.n.01', 'name': 'theremin'}, {'id': 11669, 'synset': 'thermal_printer.n.01', 'name': 'thermal_printer'}, {'id': 11670, 'synset': 'thermal_reactor.n.01', 'name': 'thermal_reactor'}, {'id': 11671, 'synset': 'thermocouple.n.01', 'name': 'thermocouple'}, {'id': 11672, 'synset': 'thermoelectric_thermometer.n.01', 'name': 'thermoelectric_thermometer'}, {'id': 11673, 'synset': 'thermograph.n.02', 'name': 'thermograph'}, {'id': 11674, 'synset': 'thermograph.n.01', 'name': 'thermograph'}, {'id': 11675, 'synset': 'thermohydrometer.n.01', 'name': 'thermohydrometer'}, {'id': 11676, 'synset': 'thermojunction.n.01', 'name': 'thermojunction'}, {'id': 11677, 'synset': 'thermonuclear_reactor.n.01', 'name': 'thermonuclear_reactor'}, {'id': 11678, 'synset': 'thermopile.n.01', 'name': 'thermopile'}, {'id': 11679, 'synset': 'thigh_pad.n.01', 'name': 'thigh_pad'}, {'id': 11680, 'synset': 'thill.n.01', 'name': 'thill'}, {'id': 11681, 'synset': 'thinning_shears.n.01', 'name': 'thinning_shears'}, {'id': 11682, 'synset': 'third_base.n.01', 'name': 'third_base'}, {'id': 11683, 'synset': 'third_gear.n.01', 'name': 'third_gear'}, {'id': 11684, 'synset': 'third_rail.n.01', 'name': 'third_rail'}, {'id': 11685, 'synset': 'thong.n.03', 'name': 'thong'}, {'id': 11686, 'synset': 'thong.n.02', 'name': 'thong'}, {'id': 11687, 'synset': 'three-centered_arch.n.01', 'name': 'three-centered_arch'}, {'id': 11688, 'synset': 'three-decker.n.02', 'name': 'three-decker'}, {'id': 11689, 'synset': 'three-dimensional_radar.n.01', 'name': 'three-dimensional_radar'}, {'id': 11690, 'synset': 'three-piece_suit.n.01', 'name': 'three-piece_suit'}, {'id': 11691, 'synset': 'three-quarter_binding.n.01', 'name': 'three-quarter_binding'}, {'id': 11692, 'synset': 'three-way_switch.n.01', 'name': 'three-way_switch'}, {'id': 11693, 'synset': 'thresher.n.01', 'name': 'thresher'}, {'id': 11694, 'synset': 'threshing_floor.n.01', 'name': 'threshing_floor'}, {'id': 11695, 'synset': 'thriftshop.n.01', 'name': 'thriftshop'}, {'id': 11696, 'synset': 'throat_protector.n.01', 'name': 'throat_protector'}, {'id': 11697, 'synset': 'throne.n.01', 'name': 'throne'}, {'id': 11698, 'synset': 'thrust_bearing.n.01', 'name': 'thrust_bearing'}, {'id': 11699, 'synset': 'thruster.n.02', 'name': 'thruster'}, {'id': 11700, 'synset': 'thumb.n.02', 'name': 'thumb'}, {'id': 11701, 'synset': 'thumbhole.n.02', 'name': 'thumbhole'}, {'id': 11702, 'synset': 'thumbscrew.n.02', 'name': 'thumbscrew'}, {'id': 11703, 'synset': 'thumbstall.n.01', 'name': 'thumbstall'}, {'id': 11704, 'synset': 'thunderer.n.02', 'name': 'thunderer'}, {'id': 11705, 'synset': 'thwart.n.01', 'name': 'thwart'}, {'id': 11706, 'synset': 'ticking.n.02', 'name': 'ticking'}, {'id': 11707, 'synset': 'tickler_coil.n.01', 'name': 'tickler_coil'}, {'id': 11708, 'synset': 'tie.n.04', 'name': 'tie'}, {'id': 11709, 'synset': 'tie.n.08', 'name': 'tie'}, {'id': 11710, 'synset': 'tie_rack.n.01', 'name': 'tie_rack'}, {'id': 11711, 'synset': 'tie_rod.n.01', 'name': 'tie_rod'}, {'id': 11712, 'synset': 'tile.n.01', 'name': 'tile'}, {'id': 11713, 'synset': 'tile_cutter.n.01', 'name': 'tile_cutter'}, {'id': 11714, 'synset': 'tile_roof.n.01', 'name': 'tile_roof'}, {'id': 11715, 'synset': 'tiller.n.03', 'name': 'tiller'}, {'id': 11716, 'synset': 'tilter.n.02', 'name': 'tilter'}, {'id': 11717, 'synset': 'tilt-top_table.n.01', 'name': 'tilt-top_table'}, {'id': 11718, 'synset': 'timber.n.02', 'name': 'timber'}, {'id': 11719, 'synset': 'timber.n.03', 'name': 'timber'}, {'id': 11720, 'synset': 'timber_hitch.n.01', 'name': 'timber_hitch'}, {'id': 11721, 'synset': 'timbrel.n.01', 'name': 'timbrel'}, {'id': 11722, 'synset': 'time_bomb.n.02', 'name': 'time_bomb'}, {'id': 11723, 'synset': 'time_capsule.n.01', 'name': 'time_capsule'}, {'id': 11724, 'synset': 'time_clock.n.01', 'name': 'time_clock'}, {'id': 11725, 'synset': 'time-delay_measuring_instrument.n.01', 'name': 'time-delay_measuring_instrument'}, {'id': 11726, 'synset': 'time-fuse.n.01', 'name': 'time-fuse'}, {'id': 11727, 'synset': 'timepiece.n.01', 'name': 'timepiece'}, {'id': 11728, 'synset': 'timer.n.03', 'name': 'timer'}, {'id': 11729, 'synset': 'time-switch.n.01', 'name': 'time-switch'}, {'id': 11730, 'synset': 'tin.n.02', 'name': 'tin'}, {'id': 11731, 'synset': 'tinderbox.n.02', 'name': 'tinderbox'}, {'id': 11732, 'synset': 'tine.n.01', 'name': 'tine'}, {'id': 11733, 'synset': 'tippet.n.01', 'name': 'tippet'}, {'id': 11734, 'synset': 'tire_chain.n.01', 'name': 'tire_chain'}, {'id': 11735, 'synset': 'tire_iron.n.01', 'name': 'tire_iron'}, {'id': 11736, 'synset': 'titfer.n.01', 'name': 'titfer'}, {'id': 11737, 'synset': 'tithe_barn.n.01', 'name': 'tithe_barn'}, {'id': 11738, 'synset': 'titrator.n.01', 'name': 'titrator'}, {'id': 11739, 'synset': 'toasting_fork.n.01', 'name': 'toasting_fork'}, {'id': 11740, 'synset': 'toastrack.n.01', 'name': 'toastrack'}, {'id': 11741, 'synset': 'tobacco_pouch.n.01', 'name': 'tobacco_pouch'}, {'id': 11742, 'synset': 'tobacco_shop.n.01', 'name': 'tobacco_shop'}, {'id': 11743, 'synset': 'toboggan.n.01', 'name': 'toboggan'}, {'id': 11744, 'synset': 'toby.n.01', 'name': 'toby'}, {'id': 11745, 'synset': 'tocsin.n.02', 'name': 'tocsin'}, {'id': 11746, 'synset': 'toe.n.02', 'name': 'toe'}, {'id': 11747, 'synset': 'toecap.n.01', 'name': 'toecap'}, {'id': 11748, 'synset': 'toehold.n.02', 'name': 'toehold'}, {'id': 11749, 'synset': 'toga.n.01', 'name': 'toga'}, {'id': 11750, 'synset': 'toga_virilis.n.01', 'name': 'toga_virilis'}, {'id': 11751, 'synset': 'toggle.n.03', 'name': 'toggle'}, {'id': 11752, 'synset': 'toggle_bolt.n.01', 'name': 'toggle_bolt'}, {'id': 11753, 'synset': 'toggle_joint.n.01', 'name': 'toggle_joint'}, {'id': 11754, 'synset': 'toggle_switch.n.01', 'name': 'toggle_switch'}, {'id': 11755, 'synset': 'togs.n.01', 'name': 'togs'}, {'id': 11756, 'synset': 'toilet.n.01', 'name': 'toilet'}, {'id': 11757, 'synset': 'toilet_bag.n.01', 'name': 'toilet_bag'}, {'id': 11758, 'synset': 'toilet_bowl.n.01', 'name': 'toilet_bowl'}, {'id': 11759, 'synset': 'toilet_kit.n.01', 'name': 'toilet_kit'}, {'id': 11760, 'synset': 'toilet_powder.n.01', 'name': 'toilet_powder'}, {'id': 11761, 'synset': 'toiletry.n.01', 'name': 'toiletry'}, {'id': 11762, 'synset': 'toilet_seat.n.01', 'name': 'toilet_seat'}, {'id': 11763, 'synset': 'toilet_water.n.01', 'name': 'toilet_water'}, {'id': 11764, 'synset': 'tokamak.n.01', 'name': 'tokamak'}, {'id': 11765, 'synset': 'token.n.03', 'name': 'token'}, {'id': 11766, 'synset': 'tollbooth.n.01', 'name': 'tollbooth'}, {'id': 11767, 'synset': 'toll_bridge.n.01', 'name': 'toll_bridge'}, {'id': 11768, 'synset': 'tollgate.n.01', 'name': 'tollgate'}, {'id': 11769, 'synset': 'toll_line.n.01', 'name': 'toll_line'}, {'id': 11770, 'synset': 'tomahawk.n.01', 'name': 'tomahawk'}, {'id': 11771, 'synset': 'tommy_gun.n.01', 'name': 'Tommy_gun'}, {'id': 11772, 'synset': 'tomograph.n.01', 'name': 'tomograph'}, {'id': 11773, 'synset': 'tone_arm.n.01', 'name': 'tone_arm'}, {'id': 11774, 'synset': 'toner.n.03', 'name': 'toner'}, {'id': 11775, 'synset': 'tongue.n.07', 'name': 'tongue'}, {'id': 11776, 'synset': 'tongue_and_groove_joint.n.01', 'name': 'tongue_and_groove_joint'}, {'id': 11777, 'synset': 'tongue_depressor.n.01', 'name': 'tongue_depressor'}, {'id': 11778, 'synset': 'tonometer.n.01', 'name': 'tonometer'}, {'id': 11779, 'synset': 'tool.n.01', 'name': 'tool'}, {'id': 11780, 'synset': 'tool_bag.n.01', 'name': 'tool_bag'}, {'id': 11781, 'synset': 'toolshed.n.01', 'name': 'toolshed'}, {'id': 11782, 'synset': 'tooth.n.02', 'name': 'tooth'}, {'id': 11783, 'synset': 'tooth.n.05', 'name': 'tooth'}, {'id': 11784, 'synset': 'top.n.10', 'name': 'top'}, {'id': 11785, 'synset': 'topgallant.n.02', 'name': 'topgallant'}, {'id': 11786, 'synset': 'topgallant.n.01', 'name': 'topgallant'}, {'id': 11787, 'synset': 'topiary.n.01', 'name': 'topiary'}, {'id': 11788, 'synset': 'topknot.n.01', 'name': 'topknot'}, {'id': 11789, 'synset': 'topmast.n.01', 'name': 'topmast'}, {'id': 11790, 'synset': 'topper.n.05', 'name': 'topper'}, {'id': 11791, 'synset': 'topsail.n.01', 'name': 'topsail'}, {'id': 11792, 'synset': 'toque.n.01', 'name': 'toque'}, {'id': 11793, 'synset': 'torch.n.01', 'name': 'torch'}, {'id': 11794, 'synset': 'torpedo.n.06', 'name': 'torpedo'}, {'id': 11795, 'synset': 'torpedo.n.05', 'name': 'torpedo'}, {'id': 11796, 'synset': 'torpedo.n.03', 'name': 'torpedo'}, {'id': 11797, 'synset': 'torpedo_boat.n.01', 'name': 'torpedo_boat'}, {'id': 11798, 'synset': 'torpedo-boat_destroyer.n.01', 'name': 'torpedo-boat_destroyer'}, {'id': 11799, 'synset': 'torpedo_tube.n.01', 'name': 'torpedo_tube'}, {'id': 11800, 'synset': 'torque_converter.n.01', 'name': 'torque_converter'}, {'id': 11801, 'synset': 'torque_wrench.n.01', 'name': 'torque_wrench'}, {'id': 11802, 'synset': 'torture_chamber.n.01', 'name': 'torture_chamber'}, {'id': 11803, 'synset': 'totem_pole.n.01', 'name': 'totem_pole'}, {'id': 11804, 'synset': 'touch_screen.n.01', 'name': 'touch_screen'}, {'id': 11805, 'synset': 'toupee.n.01', 'name': 'toupee'}, {'id': 11806, 'synset': 'touring_car.n.01', 'name': 'touring_car'}, {'id': 11807, 'synset': 'tourist_class.n.01', 'name': 'tourist_class'}, {'id': 11808, 'synset': 'toweling.n.01', 'name': 'toweling'}, {'id': 11809, 'synset': 'towel_rail.n.01', 'name': 'towel_rail'}, {'id': 11810, 'synset': 'tower.n.01', 'name': 'tower'}, {'id': 11811, 'synset': 'town_hall.n.01', 'name': 'town_hall'}, {'id': 11812, 'synset': 'towpath.n.01', 'name': 'towpath'}, {'id': 11813, 'synset': 'toy_box.n.01', 'name': 'toy_box'}, {'id': 11814, 'synset': 'toyshop.n.01', 'name': 'toyshop'}, {'id': 11815, 'synset': 'trace_detector.n.01', 'name': 'trace_detector'}, {'id': 11816, 'synset': 'track.n.09', 'name': 'track'}, {'id': 11817, 'synset': 'track.n.08', 'name': 'track'}, {'id': 11818, 'synset': 'trackball.n.01', 'name': 'trackball'}, {'id': 11819, 'synset': 'tracked_vehicle.n.01', 'name': 'tracked_vehicle'}, {'id': 11820, 'synset': 'tract_house.n.01', 'name': 'tract_house'}, {'id': 11821, 'synset': 'tract_housing.n.01', 'name': 'tract_housing'}, {'id': 11822, 'synset': 'traction_engine.n.01', 'name': 'traction_engine'}, {'id': 11823, 'synset': 'tractor.n.02', 'name': 'tractor'}, {'id': 11824, 'synset': 'trailer.n.04', 'name': 'trailer'}, {'id': 11825, 'synset': 'trailer.n.03', 'name': 'trailer'}, {'id': 11826, 'synset': 'trailer_camp.n.01', 'name': 'trailer_camp'}, {'id': 11827, 'synset': 'trailing_edge.n.01', 'name': 'trailing_edge'}, {'id': 11828, 'synset': 'tramline.n.01', 'name': 'tramline'}, {'id': 11829, 'synset': 'trammel.n.02', 'name': 'trammel'}, {'id': 11830, 'synset': 'tramp_steamer.n.01', 'name': 'tramp_steamer'}, {'id': 11831, 'synset': 'tramway.n.01', 'name': 'tramway'}, {'id': 11832, 'synset': 'transdermal_patch.n.01', 'name': 'transdermal_patch'}, {'id': 11833, 'synset': 'transept.n.01', 'name': 'transept'}, {'id': 11834, 'synset': 'transformer.n.01', 'name': 'transformer'}, {'id': 11835, 'synset': 'transistor.n.01', 'name': 'transistor'}, {'id': 11836, 'synset': 'transit_instrument.n.01', 'name': 'transit_instrument'}, {'id': 11837, 'synset': 'transmission.n.05', 'name': 'transmission'}, {'id': 11838, 'synset': 'transmission_shaft.n.01', 'name': 'transmission_shaft'}, {'id': 11839, 'synset': 'transmitter.n.03', 'name': 'transmitter'}, {'id': 11840, 'synset': 'transom.n.02', 'name': 'transom'}, {'id': 11841, 'synset': 'transom.n.01', 'name': 'transom'}, {'id': 11842, 'synset': 'transponder.n.01', 'name': 'transponder'}, {'id': 11843, 'synset': 'transporter.n.02', 'name': 'transporter'}, {'id': 11844, 'synset': 'transporter.n.01', 'name': 'transporter'}, {'id': 11845, 'synset': 'transport_ship.n.01', 'name': 'transport_ship'}, {'id': 11846, 'synset': 'trap.n.01', 'name': 'trap'}, {'id': 11847, 'synset': 'trap_door.n.01', 'name': 'trap_door'}, {'id': 11848, 'synset': 'trapeze.n.01', 'name': 'trapeze'}, {'id': 11849, 'synset': 'trave.n.01', 'name': 'trave'}, {'id': 11850, 'synset': 'travel_iron.n.01', 'name': 'travel_iron'}, {'id': 11851, 'synset': 'trawl.n.02', 'name': 'trawl'}, {'id': 11852, 'synset': 'trawl.n.01', 'name': 'trawl'}, {'id': 11853, 'synset': 'trawler.n.02', 'name': 'trawler'}, {'id': 11854, 'synset': 'tray_cloth.n.01', 'name': 'tray_cloth'}, {'id': 11855, 'synset': 'tread.n.04', 'name': 'tread'}, {'id': 11856, 'synset': 'tread.n.03', 'name': 'tread'}, {'id': 11857, 'synset': 'treadmill.n.02', 'name': 'treadmill'}, {'id': 11858, 'synset': 'treadmill.n.01', 'name': 'treadmill'}, {'id': 11859, 'synset': 'treasure_chest.n.01', 'name': 'treasure_chest'}, {'id': 11860, 'synset': 'treasure_ship.n.01', 'name': 'treasure_ship'}, {'id': 11861, 'synset': 'treenail.n.01', 'name': 'treenail'}, {'id': 11862, 'synset': 'trefoil_arch.n.01', 'name': 'trefoil_arch'}, {'id': 11863, 'synset': 'trellis.n.01', 'name': 'trellis'}, {'id': 11864, 'synset': 'trench.n.01', 'name': 'trench'}, {'id': 11865, 'synset': 'trench_knife.n.01', 'name': 'trench_knife'}, {'id': 11866, 'synset': 'trepan.n.02', 'name': 'trepan'}, {'id': 11867, 'synset': 'trepan.n.01', 'name': 'trepan'}, {'id': 11868, 'synset': 'trestle.n.02', 'name': 'trestle'}, {'id': 11869, 'synset': 'trestle.n.01', 'name': 'trestle'}, {'id': 11870, 'synset': 'trestle_bridge.n.01', 'name': 'trestle_bridge'}, {'id': 11871, 'synset': 'trestle_table.n.01', 'name': 'trestle_table'}, {'id': 11872, 'synset': 'trestlework.n.01', 'name': 'trestlework'}, {'id': 11873, 'synset': 'trews.n.01', 'name': 'trews'}, {'id': 11874, 'synset': 'trial_balloon.n.02', 'name': 'trial_balloon'}, {'id': 11875, 'synset': 'triangle.n.04', 'name': 'triangle'}, {'id': 11876, 'synset': 'triclinium.n.02', 'name': 'triclinium'}, {'id': 11877, 'synset': 'triclinium.n.01', 'name': 'triclinium'}, {'id': 11878, 'synset': 'tricorn.n.01', 'name': 'tricorn'}, {'id': 11879, 'synset': 'tricot.n.01', 'name': 'tricot'}, {'id': 11880, 'synset': 'trident.n.01', 'name': 'trident'}, {'id': 11881, 'synset': 'trigger.n.02', 'name': 'trigger'}, {'id': 11882, 'synset': 'trimaran.n.01', 'name': 'trimaran'}, {'id': 11883, 'synset': 'trimmer.n.02', 'name': 'trimmer'}, {'id': 11884, 'synset': 'trimmer_arch.n.01', 'name': 'trimmer_arch'}, {'id': 11885, 'synset': 'triode.n.01', 'name': 'triode'}, {'id': 11886, 'synset': 'triptych.n.01', 'name': 'triptych'}, {'id': 11887, 'synset': 'trip_wire.n.02', 'name': 'trip_wire'}, {'id': 11888, 'synset': 'trireme.n.01', 'name': 'trireme'}, {'id': 11889, 'synset': 'triskelion.n.01', 'name': 'triskelion'}, {'id': 11890, 'synset': 'triumphal_arch.n.01', 'name': 'triumphal_arch'}, {'id': 11891, 'synset': 'trivet.n.02', 'name': 'trivet'}, {'id': 11892, 'synset': 'trivet.n.01', 'name': 'trivet'}, {'id': 11893, 'synset': 'troika.n.01', 'name': 'troika'}, {'id': 11894, 'synset': 'troll.n.03', 'name': 'troll'}, {'id': 11895, 'synset': 'trolleybus.n.01', 'name': 'trolleybus'}, {'id': 11896, 'synset': 'trombone.n.01', 'name': 'trombone'}, {'id': 11897, 'synset': 'troop_carrier.n.01', 'name': 'troop_carrier'}, {'id': 11898, 'synset': 'troopship.n.01', 'name': 'troopship'}, {'id': 11899, 'synset': 'trophy_case.n.01', 'name': 'trophy_case'}, {'id': 11900, 'synset': 'trough.n.05', 'name': 'trough'}, {'id': 11901, 'synset': 'trouser.n.02', 'name': 'trouser'}, {'id': 11902, 'synset': 'trouser_cuff.n.01', 'name': 'trouser_cuff'}, {'id': 11903, 'synset': 'trouser_press.n.01', 'name': 'trouser_press'}, {'id': 11904, 'synset': 'trousseau.n.01', 'name': 'trousseau'}, {'id': 11905, 'synset': 'trowel.n.01', 'name': 'trowel'}, {'id': 11906, 'synset': 'trumpet_arch.n.01', 'name': 'trumpet_arch'}, {'id': 11907, 'synset': 'truncheon.n.01', 'name': 'truncheon'}, {'id': 11908, 'synset': 'trundle_bed.n.01', 'name': 'trundle_bed'}, {'id': 11909, 'synset': 'trunk_hose.n.01', 'name': 'trunk_hose'}, {'id': 11910, 'synset': 'trunk_lid.n.01', 'name': 'trunk_lid'}, {'id': 11911, 'synset': 'trunk_line.n.02', 'name': 'trunk_line'}, {'id': 11912, 'synset': 'truss.n.02', 'name': 'truss'}, {'id': 11913, 'synset': 'truss_bridge.n.01', 'name': 'truss_bridge'}, {'id': 11914, 'synset': 'try_square.n.01', 'name': 'try_square'}, {'id': 11915, 'synset': 't-square.n.01', 'name': 'T-square'}, {'id': 11916, 'synset': 'tube.n.02', 'name': 'tube'}, {'id': 11917, 'synset': 'tuck_box.n.01', 'name': 'tuck_box'}, {'id': 11918, 'synset': 'tucker.n.04', 'name': 'tucker'}, {'id': 11919, 'synset': 'tucker-bag.n.01', 'name': 'tucker-bag'}, {'id': 11920, 'synset': 'tuck_shop.n.01', 'name': 'tuck_shop'}, {'id': 11921, 'synset': 'tudor_arch.n.01', 'name': 'Tudor_arch'}, {'id': 11922, 'synset': 'tudung.n.01', 'name': 'tudung'}, {'id': 11923, 'synset': 'tugboat.n.01', 'name': 'tugboat'}, {'id': 11924, 'synset': 'tulle.n.01', 'name': 'tulle'}, {'id': 11925, 'synset': 'tumble-dryer.n.01', 'name': 'tumble-dryer'}, {'id': 11926, 'synset': 'tumbler.n.02', 'name': 'tumbler'}, {'id': 11927, 'synset': 'tumbrel.n.01', 'name': 'tumbrel'}, {'id': 11928, 'synset': 'tun.n.01', 'name': 'tun'}, {'id': 11929, 'synset': 'tunic.n.02', 'name': 'tunic'}, {'id': 11930, 'synset': 'tuning_fork.n.01', 'name': 'tuning_fork'}, {'id': 11931, 'synset': 'tupik.n.01', 'name': 'tupik'}, {'id': 11932, 'synset': 'turbine.n.01', 'name': 'turbine'}, {'id': 11933, 'synset': 'turbogenerator.n.01', 'name': 'turbogenerator'}, {'id': 11934, 'synset': 'tureen.n.01', 'name': 'tureen'}, {'id': 11935, 'synset': 'turkish_bath.n.01', 'name': 'Turkish_bath'}, {'id': 11936, 'synset': 'turkish_towel.n.01', 'name': 'Turkish_towel'}, {'id': 11937, 'synset': "turk's_head.n.01", 'name': "Turk's_head"}, {'id': 11938, 'synset': 'turnbuckle.n.01', 'name': 'turnbuckle'}, {'id': 11939, 'synset': 'turner.n.08', 'name': 'turner'}, {'id': 11940, 'synset': 'turnery.n.01', 'name': 'turnery'}, {'id': 11941, 'synset': 'turnpike.n.01', 'name': 'turnpike'}, {'id': 11942, 'synset': 'turnspit.n.01', 'name': 'turnspit'}, {'id': 11943, 'synset': 'turnstile.n.01', 'name': 'turnstile'}, {'id': 11944, 'synset': 'turntable.n.01', 'name': 'turntable'}, {'id': 11945, 'synset': 'turntable.n.02', 'name': 'turntable'}, {'id': 11946, 'synset': 'turret.n.01', 'name': 'turret'}, {'id': 11947, 'synset': 'turret_clock.n.01', 'name': 'turret_clock'}, {'id': 11948, 'synset': 'tweed.n.01', 'name': 'tweed'}, {'id': 11949, 'synset': 'tweeter.n.01', 'name': 'tweeter'}, {'id': 11950, 'synset': 'twenty-two.n.02', 'name': 'twenty-two'}, {'id': 11951, 'synset': 'twenty-two_pistol.n.01', 'name': 'twenty-two_pistol'}, {'id': 11952, 'synset': 'twenty-two_rifle.n.01', 'name': 'twenty-two_rifle'}, {'id': 11953, 'synset': 'twill.n.02', 'name': 'twill'}, {'id': 11954, 'synset': 'twill.n.01', 'name': 'twill'}, {'id': 11955, 'synset': 'twin_bed.n.01', 'name': 'twin_bed'}, {'id': 11956, 'synset': 'twinjet.n.01', 'name': 'twinjet'}, {'id': 11957, 'synset': 'twist_bit.n.01', 'name': 'twist_bit'}, {'id': 11958, 'synset': 'two-by-four.n.01', 'name': 'two-by-four'}, {'id': 11959, 'synset': 'two-man_tent.n.01', 'name': 'two-man_tent'}, {'id': 11960, 'synset': 'two-piece.n.01', 'name': 'two-piece'}, {'id': 11961, 'synset': 'typesetting_machine.n.01', 'name': 'typesetting_machine'}, {'id': 11962, 'synset': 'typewriter_carriage.n.01', 'name': 'typewriter_carriage'}, {'id': 11963, 'synset': 'typewriter_keyboard.n.01', 'name': 'typewriter_keyboard'}, {'id': 11964, 'synset': 'tyrolean.n.02', 'name': 'tyrolean'}, {'id': 11965, 'synset': 'uke.n.01', 'name': 'uke'}, {'id': 11966, 'synset': 'ulster.n.02', 'name': 'ulster'}, {'id': 11967, 'synset': 'ultracentrifuge.n.01', 'name': 'ultracentrifuge'}, {'id': 11968, 'synset': 'ultramicroscope.n.01', 'name': 'ultramicroscope'}, {'id': 11969, 'synset': 'ultrasuede.n.01', 'name': 'Ultrasuede'}, {'id': 11970, 'synset': 'ultraviolet_lamp.n.01', 'name': 'ultraviolet_lamp'}, {'id': 11971, 'synset': 'umbrella_tent.n.01', 'name': 'umbrella_tent'}, {'id': 11972, 'synset': 'undercarriage.n.01', 'name': 'undercarriage'}, {'id': 11973, 'synset': 'undercoat.n.01', 'name': 'undercoat'}, {'id': 11974, 'synset': 'undergarment.n.01', 'name': 'undergarment'}, {'id': 11975, 'synset': 'underpants.n.01', 'name': 'underpants'}, {'id': 11976, 'synset': 'undies.n.01', 'name': 'undies'}, {'id': 11977, 'synset': 'uneven_parallel_bars.n.01', 'name': 'uneven_parallel_bars'}, {'id': 11978, 'synset': 'uniform.n.01', 'name': 'uniform'}, {'id': 11979, 'synset': 'universal_joint.n.01', 'name': 'universal_joint'}, {'id': 11980, 'synset': 'university.n.02', 'name': 'university'}, {'id': 11981, 'synset': 'upholstery.n.01', 'name': 'upholstery'}, {'id': 11982, 'synset': 'upholstery_material.n.01', 'name': 'upholstery_material'}, {'id': 11983, 'synset': 'upholstery_needle.n.01', 'name': 'upholstery_needle'}, {'id': 11984, 'synset': 'uplift.n.02', 'name': 'uplift'}, {'id': 11985, 'synset': 'upper_berth.n.01', 'name': 'upper_berth'}, {'id': 11986, 'synset': 'upright.n.02', 'name': 'upright'}, {'id': 11987, 'synset': 'upset.n.04', 'name': 'upset'}, {'id': 11988, 'synset': 'upstairs.n.01', 'name': 'upstairs'}, {'id': 11989, 'synset': 'urceole.n.01', 'name': 'urceole'}, {'id': 11990, 'synset': 'urn.n.02', 'name': 'urn'}, {'id': 11991, 'synset': 'used-car.n.01', 'name': 'used-car'}, {'id': 11992, 'synset': 'utensil.n.01', 'name': 'utensil'}, {'id': 11993, 'synset': 'uzi.n.01', 'name': 'Uzi'}, {'id': 11994, 'synset': 'vacation_home.n.01', 'name': 'vacation_home'}, {'id': 11995, 'synset': 'vacuum_chamber.n.01', 'name': 'vacuum_chamber'}, {'id': 11996, 'synset': 'vacuum_flask.n.01', 'name': 'vacuum_flask'}, {'id': 11997, 'synset': 'vacuum_gauge.n.01', 'name': 'vacuum_gauge'}, {'id': 11998, 'synset': 'valenciennes.n.02', 'name': 'Valenciennes'}, {'id': 11999, 'synset': 'valise.n.01', 'name': 'valise'}, {'id': 12000, 'synset': 'valve.n.03', 'name': 'valve'}, {'id': 12001, 'synset': 'valve.n.02', 'name': 'valve'}, {'id': 12002, 'synset': 'valve-in-head_engine.n.01', 'name': 'valve-in-head_engine'}, {'id': 12003, 'synset': 'vambrace.n.01', 'name': 'vambrace'}, {'id': 12004, 'synset': 'van.n.05', 'name': 'van'}, {'id': 12005, 'synset': 'van.n.04', 'name': 'van'}, {'id': 12006, 'synset': 'vane.n.02', 'name': 'vane'}, {'id': 12007, 'synset': 'vaporizer.n.01', 'name': 'vaporizer'}, {'id': 12008, 'synset': 'variable-pitch_propeller.n.01', 'name': 'variable-pitch_propeller'}, {'id': 12009, 'synset': 'variometer.n.01', 'name': 'variometer'}, {'id': 12010, 'synset': 'varnish.n.01', 'name': 'varnish'}, {'id': 12011, 'synset': 'vault.n.03', 'name': 'vault'}, {'id': 12012, 'synset': 'vault.n.02', 'name': 'vault'}, {'id': 12013, 'synset': 'vaulting_horse.n.01', 'name': 'vaulting_horse'}, {'id': 12014, 'synset': 'vehicle.n.01', 'name': 'vehicle'}, {'id': 12015, 'synset': 'velcro.n.01', 'name': 'Velcro'}, {'id': 12016, 'synset': 'velocipede.n.01', 'name': 'velocipede'}, {'id': 12017, 'synset': 'velour.n.01', 'name': 'velour'}, {'id': 12018, 'synset': 'velvet.n.01', 'name': 'velvet'}, {'id': 12019, 'synset': 'velveteen.n.01', 'name': 'velveteen'}, {'id': 12020, 'synset': 'veneer.n.01', 'name': 'veneer'}, {'id': 12021, 'synset': 'venetian_blind.n.01', 'name': 'Venetian_blind'}, {'id': 12022, 'synset': 'venn_diagram.n.01', 'name': 'Venn_diagram'}, {'id': 12023, 'synset': 'ventilation.n.02', 'name': 'ventilation'}, {'id': 12024, 'synset': 'ventilation_shaft.n.01', 'name': 'ventilation_shaft'}, {'id': 12025, 'synset': 'ventilator.n.01', 'name': 'ventilator'}, {'id': 12026, 'synset': 'veranda.n.01', 'name': 'veranda'}, {'id': 12027, 'synset': 'verdigris.n.02', 'name': 'verdigris'}, {'id': 12028, 'synset': 'vernier_caliper.n.01', 'name': 'vernier_caliper'}, {'id': 12029, 'synset': 'vernier_scale.n.01', 'name': 'vernier_scale'}, {'id': 12030, 'synset': 'vertical_file.n.01', 'name': 'vertical_file'}, {'id': 12031, 'synset': 'vertical_stabilizer.n.01', 'name': 'vertical_stabilizer'}, {'id': 12032, 'synset': 'vertical_tail.n.01', 'name': 'vertical_tail'}, {'id': 12033, 'synset': 'very_pistol.n.01', 'name': 'Very_pistol'}, {'id': 12034, 'synset': 'vessel.n.02', 'name': 'vessel'}, {'id': 12035, 'synset': 'vessel.n.03', 'name': 'vessel'}, {'id': 12036, 'synset': 'vestiture.n.01', 'name': 'vestiture'}, {'id': 12037, 'synset': 'vestment.n.01', 'name': 'vestment'}, {'id': 12038, 'synset': 'vest_pocket.n.01', 'name': 'vest_pocket'}, {'id': 12039, 'synset': 'vestry.n.02', 'name': 'vestry'}, {'id': 12040, 'synset': 'viaduct.n.01', 'name': 'viaduct'}, {'id': 12041, 'synset': 'vibraphone.n.01', 'name': 'vibraphone'}, {'id': 12042, 'synset': 'vibrator.n.02', 'name': 'vibrator'}, {'id': 12043, 'synset': 'vibrator.n.01', 'name': 'vibrator'}, {'id': 12044, 'synset': 'victrola.n.01', 'name': 'Victrola'}, {'id': 12045, 'synset': 'vicuna.n.02', 'name': 'vicuna'}, {'id': 12046, 'synset': 'videocassette.n.01', 'name': 'videocassette'}, {'id': 12047, 'synset': 'videocassette_recorder.n.01', 'name': 'videocassette_recorder'}, {'id': 12048, 'synset': 'videodisk.n.01', 'name': 'videodisk'}, {'id': 12049, 'synset': 'video_recording.n.01', 'name': 'video_recording'}, {'id': 12050, 'synset': 'videotape.n.02', 'name': 'videotape'}, {'id': 12051, 'synset': 'vigil_light.n.01', 'name': 'vigil_light'}, {'id': 12052, 'synset': 'villa.n.04', 'name': 'villa'}, {'id': 12053, 'synset': 'villa.n.03', 'name': 'villa'}, {'id': 12054, 'synset': 'villa.n.02', 'name': 'villa'}, {'id': 12055, 'synset': 'viol.n.01', 'name': 'viol'}, {'id': 12056, 'synset': 'viola.n.03', 'name': 'viola'}, {'id': 12057, 'synset': 'viola_da_braccio.n.01', 'name': 'viola_da_braccio'}, {'id': 12058, 'synset': 'viola_da_gamba.n.01', 'name': 'viola_da_gamba'}, {'id': 12059, 'synset': "viola_d'amore.n.01", 'name': "viola_d'amore"}, {'id': 12060, 'synset': 'virginal.n.01', 'name': 'virginal'}, {'id': 12061, 'synset': 'viscometer.n.01', 'name': 'viscometer'}, {'id': 12062, 'synset': 'viscose_rayon.n.01', 'name': 'viscose_rayon'}, {'id': 12063, 'synset': 'vise.n.01', 'name': 'vise'}, {'id': 12064, 'synset': 'visor.n.01', 'name': 'visor'}, {'id': 12065, 'synset': 'visual_display_unit.n.01', 'name': 'visual_display_unit'}, {'id': 12066, 'synset': 'vivarium.n.01', 'name': 'vivarium'}, {'id': 12067, 'synset': 'viyella.n.01', 'name': 'Viyella'}, {'id': 12068, 'synset': 'voile.n.01', 'name': 'voile'}, {'id': 12069, 'synset': 'volleyball_net.n.01', 'name': 'volleyball_net'}, {'id': 12070, 'synset': 'voltage_regulator.n.01', 'name': 'voltage_regulator'}, {'id': 12071, 'synset': 'voltaic_cell.n.01', 'name': 'voltaic_cell'}, {'id': 12072, 'synset': 'voltaic_pile.n.01', 'name': 'voltaic_pile'}, {'id': 12073, 'synset': 'voltmeter.n.01', 'name': 'voltmeter'}, {'id': 12074, 'synset': 'vomitory.n.01', 'name': 'vomitory'}, {'id': 12075, 'synset': 'von_neumann_machine.n.01', 'name': 'von_Neumann_machine'}, {'id': 12076, 'synset': 'voting_booth.n.01', 'name': 'voting_booth'}, {'id': 12077, 'synset': 'voting_machine.n.01', 'name': 'voting_machine'}, {'id': 12078, 'synset': 'voussoir.n.01', 'name': 'voussoir'}, {'id': 12079, 'synset': 'vox_angelica.n.01', 'name': 'vox_angelica'}, {'id': 12080, 'synset': 'vox_humana.n.01', 'name': 'vox_humana'}, {'id': 12081, 'synset': 'waders.n.01', 'name': 'waders'}, {'id': 12082, 'synset': 'wading_pool.n.01', 'name': 'wading_pool'}, {'id': 12083, 'synset': 'wagon.n.04', 'name': 'wagon'}, {'id': 12084, 'synset': 'wagon_tire.n.01', 'name': 'wagon_tire'}, {'id': 12085, 'synset': 'wain.n.03', 'name': 'wain'}, {'id': 12086, 'synset': 'wainscot.n.02', 'name': 'wainscot'}, {'id': 12087, 'synset': 'wainscoting.n.01', 'name': 'wainscoting'}, {'id': 12088, 'synset': 'waist_pack.n.01', 'name': 'waist_pack'}, {'id': 12089, 'synset': 'walker.n.06', 'name': 'walker'}, {'id': 12090, 'synset': 'walker.n.05', 'name': 'walker'}, {'id': 12091, 'synset': 'walker.n.04', 'name': 'walker'}, {'id': 12092, 'synset': 'walkie-talkie.n.01', 'name': 'walkie-talkie'}, {'id': 12093, 'synset': 'walk-in.n.04', 'name': 'walk-in'}, {'id': 12094, 'synset': 'walking_shoe.n.01', 'name': 'walking_shoe'}, {'id': 12095, 'synset': 'walkman.n.01', 'name': 'Walkman'}, {'id': 12096, 'synset': 'walk-up_apartment.n.01', 'name': 'walk-up_apartment'}, {'id': 12097, 'synset': 'wall.n.01', 'name': 'wall'}, {'id': 12098, 'synset': 'wall.n.07', 'name': 'wall'}, {'id': 12099, 'synset': 'wall_tent.n.01', 'name': 'wall_tent'}, {'id': 12100, 'synset': 'wall_unit.n.01', 'name': 'wall_unit'}, {'id': 12101, 'synset': 'wand.n.01', 'name': 'wand'}, {'id': 12102, 'synset': 'wankel_engine.n.01', 'name': 'Wankel_engine'}, {'id': 12103, 'synset': 'ward.n.03', 'name': 'ward'}, {'id': 12104, 'synset': 'wardroom.n.01', 'name': 'wardroom'}, {'id': 12105, 'synset': 'warehouse.n.01', 'name': 'warehouse'}, {'id': 12106, 'synset': 'warming_pan.n.01', 'name': 'warming_pan'}, {'id': 12107, 'synset': 'war_paint.n.02', 'name': 'war_paint'}, {'id': 12108, 'synset': 'warplane.n.01', 'name': 'warplane'}, {'id': 12109, 'synset': 'war_room.n.01', 'name': 'war_room'}, {'id': 12110, 'synset': 'warship.n.01', 'name': 'warship'}, {'id': 12111, 'synset': 'wash.n.01', 'name': 'wash'}, {'id': 12112, 'synset': 'wash-and-wear.n.01', 'name': 'wash-and-wear'}, {'id': 12113, 'synset': 'washbasin.n.02', 'name': 'washbasin'}, {'id': 12114, 'synset': 'washboard.n.02', 'name': 'washboard'}, {'id': 12115, 'synset': 'washboard.n.01', 'name': 'washboard'}, {'id': 12116, 'synset': 'washer.n.02', 'name': 'washer'}, {'id': 12117, 'synset': 'washhouse.n.01', 'name': 'washhouse'}, {'id': 12118, 'synset': 'washroom.n.01', 'name': 'washroom'}, {'id': 12119, 'synset': 'washstand.n.01', 'name': 'washstand'}, {'id': 12120, 'synset': 'washtub.n.01', 'name': 'washtub'}, {'id': 12121, 'synset': 'wastepaper_basket.n.01', 'name': 'wastepaper_basket'}, {'id': 12122, 'synset': 'watch_cap.n.01', 'name': 'watch_cap'}, {'id': 12123, 'synset': 'watch_case.n.01', 'name': 'watch_case'}, {'id': 12124, 'synset': 'watch_glass.n.01', 'name': 'watch_glass'}, {'id': 12125, 'synset': 'watchtower.n.01', 'name': 'watchtower'}, {'id': 12126, 'synset': 'water-base_paint.n.01', 'name': 'water-base_paint'}, {'id': 12127, 'synset': 'water_bed.n.01', 'name': 'water_bed'}, {'id': 12128, 'synset': 'water_butt.n.01', 'name': 'water_butt'}, {'id': 12129, 'synset': 'water_cart.n.01', 'name': 'water_cart'}, {'id': 12130, 'synset': 'water_chute.n.01', 'name': 'water_chute'}, {'id': 12131, 'synset': 'water_closet.n.01', 'name': 'water_closet'}, {'id': 12132, 'synset': 'watercolor.n.02', 'name': 'watercolor'}, {'id': 12133, 'synset': 'water-cooled_reactor.n.01', 'name': 'water-cooled_reactor'}, {'id': 12134, 'synset': 'water_filter.n.01', 'name': 'water_filter'}, {'id': 12135, 'synset': 'water_gauge.n.01', 'name': 'water_gauge'}, {'id': 12136, 'synset': 'water_glass.n.02', 'name': 'water_glass'}, {'id': 12137, 'synset': 'water_hazard.n.01', 'name': 'water_hazard'}, {'id': 12138, 'synset': 'watering_cart.n.01', 'name': 'watering_cart'}, {'id': 12139, 'synset': 'water_jacket.n.01', 'name': 'water_jacket'}, {'id': 12140, 'synset': 'water_jump.n.01', 'name': 'water_jump'}, {'id': 12141, 'synset': 'water_level.n.04', 'name': 'water_level'}, {'id': 12142, 'synset': 'water_meter.n.01', 'name': 'water_meter'}, {'id': 12143, 'synset': 'water_mill.n.01', 'name': 'water_mill'}, {'id': 12144, 'synset': 'waterproof.n.01', 'name': 'waterproof'}, {'id': 12145, 'synset': 'waterproofing.n.02', 'name': 'waterproofing'}, {'id': 12146, 'synset': 'water_pump.n.01', 'name': 'water_pump'}, {'id': 12147, 'synset': 'waterspout.n.03', 'name': 'waterspout'}, {'id': 12148, 'synset': 'water_wagon.n.01', 'name': 'water_wagon'}, {'id': 12149, 'synset': 'waterwheel.n.02', 'name': 'waterwheel'}, {'id': 12150, 'synset': 'waterwheel.n.01', 'name': 'waterwheel'}, {'id': 12151, 'synset': 'water_wings.n.01', 'name': 'water_wings'}, {'id': 12152, 'synset': 'waterworks.n.02', 'name': 'waterworks'}, {'id': 12153, 'synset': 'wattmeter.n.01', 'name': 'wattmeter'}, {'id': 12154, 'synset': 'waxwork.n.02', 'name': 'waxwork'}, {'id': 12155, 'synset': 'ways.n.01', 'name': 'ways'}, {'id': 12156, 'synset': 'weapon.n.01', 'name': 'weapon'}, {'id': 12157, 'synset': 'weaponry.n.01', 'name': 'weaponry'}, {'id': 12158, 'synset': 'weapons_carrier.n.01', 'name': 'weapons_carrier'}, {'id': 12159, 'synset': 'weathercock.n.01', 'name': 'weathercock'}, {'id': 12160, 'synset': 'weatherglass.n.01', 'name': 'weatherglass'}, {'id': 12161, 'synset': 'weather_satellite.n.01', 'name': 'weather_satellite'}, {'id': 12162, 'synset': 'weather_ship.n.01', 'name': 'weather_ship'}, {'id': 12163, 'synset': 'web.n.02', 'name': 'web'}, {'id': 12164, 'synset': 'web.n.06', 'name': 'web'}, {'id': 12165, 'synset': 'webbing.n.03', 'name': 'webbing'}, {'id': 12166, 'synset': 'wedge.n.06', 'name': 'wedge'}, {'id': 12167, 'synset': 'wedge.n.05', 'name': 'wedge'}, {'id': 12168, 'synset': 'wedgie.n.01', 'name': 'wedgie'}, {'id': 12169, 'synset': 'wedgwood.n.02', 'name': 'Wedgwood'}, {'id': 12170, 'synset': 'weeder.n.02', 'name': 'weeder'}, {'id': 12171, 'synset': 'weeds.n.01', 'name': 'weeds'}, {'id': 12172, 'synset': 'weekender.n.02', 'name': 'weekender'}, {'id': 12173, 'synset': 'weighbridge.n.01', 'name': 'weighbridge'}, {'id': 12174, 'synset': 'weight.n.02', 'name': 'weight'}, {'id': 12175, 'synset': 'weir.n.01', 'name': 'weir'}, {'id': 12176, 'synset': 'weir.n.02', 'name': 'weir'}, {'id': 12177, 'synset': 'welcome_wagon.n.01', 'name': 'welcome_wagon'}, {'id': 12178, 'synset': 'weld.n.03', 'name': 'weld'}, {'id': 12179, 'synset': "welder's_mask.n.01", 'name': "welder's_mask"}, {'id': 12180, 'synset': 'weldment.n.01', 'name': 'weldment'}, {'id': 12181, 'synset': 'well.n.02', 'name': 'well'}, {'id': 12182, 'synset': 'wellhead.n.02', 'name': 'wellhead'}, {'id': 12183, 'synset': 'welt.n.02', 'name': 'welt'}, {'id': 12184, 'synset': 'weston_cell.n.01', 'name': 'Weston_cell'}, {'id': 12185, 'synset': 'wet_bar.n.01', 'name': 'wet_bar'}, {'id': 12186, 'synset': 'wet-bulb_thermometer.n.01', 'name': 'wet-bulb_thermometer'}, {'id': 12187, 'synset': 'wet_cell.n.01', 'name': 'wet_cell'}, {'id': 12188, 'synset': 'wet_fly.n.01', 'name': 'wet_fly'}, {'id': 12189, 'synset': 'whaleboat.n.01', 'name': 'whaleboat'}, {'id': 12190, 'synset': 'whaler.n.02', 'name': 'whaler'}, {'id': 12191, 'synset': 'whaling_gun.n.01', 'name': 'whaling_gun'}, {'id': 12192, 'synset': 'wheel.n.04', 'name': 'wheel'}, {'id': 12193, 'synset': 'wheel_and_axle.n.01', 'name': 'wheel_and_axle'}, {'id': 12194, 'synset': 'wheeled_vehicle.n.01', 'name': 'wheeled_vehicle'}, {'id': 12195, 'synset': 'wheelwork.n.01', 'name': 'wheelwork'}, {'id': 12196, 'synset': 'wherry.n.02', 'name': 'wherry'}, {'id': 12197, 'synset': 'wherry.n.01', 'name': 'wherry'}, {'id': 12198, 'synset': 'whetstone.n.01', 'name': 'whetstone'}, {'id': 12199, 'synset': 'whiffletree.n.01', 'name': 'whiffletree'}, {'id': 12200, 'synset': 'whip.n.01', 'name': 'whip'}, {'id': 12201, 'synset': 'whipcord.n.02', 'name': 'whipcord'}, {'id': 12202, 'synset': 'whipping_post.n.01', 'name': 'whipping_post'}, {'id': 12203, 'synset': 'whipstitch.n.01', 'name': 'whipstitch'}, {'id': 12204, 'synset': 'whirler.n.02', 'name': 'whirler'}, {'id': 12205, 'synset': 'whisk.n.02', 'name': 'whisk'}, {'id': 12206, 'synset': 'whisk.n.01', 'name': 'whisk'}, {'id': 12207, 'synset': 'whiskey_bottle.n.01', 'name': 'whiskey_bottle'}, {'id': 12208, 'synset': 'whiskey_jug.n.01', 'name': 'whiskey_jug'}, {'id': 12209, 'synset': 'whispering_gallery.n.01', 'name': 'whispering_gallery'}, {'id': 12210, 'synset': 'whistle.n.04', 'name': 'whistle'}, {'id': 12211, 'synset': 'white.n.11', 'name': 'white'}, {'id': 12212, 'synset': 'white_goods.n.01', 'name': 'white_goods'}, {'id': 12213, 'synset': 'whitewash.n.02', 'name': 'whitewash'}, {'id': 12214, 'synset': 'whorehouse.n.01', 'name': 'whorehouse'}, {'id': 12215, 'synset': 'wick.n.02', 'name': 'wick'}, {'id': 12216, 'synset': 'wicker.n.02', 'name': 'wicker'}, {'id': 12217, 'synset': 'wicker_basket.n.01', 'name': 'wicker_basket'}, {'id': 12218, 'synset': 'wicket.n.02', 'name': 'wicket'}, {'id': 12219, 'synset': 'wicket.n.01', 'name': 'wicket'}, {'id': 12220, 'synset': 'wickiup.n.01', 'name': 'wickiup'}, {'id': 12221, 'synset': 'wide-angle_lens.n.01', 'name': 'wide-angle_lens'}, {'id': 12222, 'synset': 'widebody_aircraft.n.01', 'name': 'widebody_aircraft'}, {'id': 12223, 'synset': 'wide_wale.n.01', 'name': 'wide_wale'}, {'id': 12224, 'synset': "widow's_walk.n.01", 'name': "widow's_walk"}, {'id': 12225, 'synset': 'wiffle.n.01', 'name': 'Wiffle'}, {'id': 12226, 'synset': 'wigwam.n.01', 'name': 'wigwam'}, {'id': 12227, 'synset': 'wilton.n.01', 'name': 'Wilton'}, {'id': 12228, 'synset': 'wimple.n.01', 'name': 'wimple'}, {'id': 12229, 'synset': 'wincey.n.01', 'name': 'wincey'}, {'id': 12230, 'synset': 'winceyette.n.01', 'name': 'winceyette'}, {'id': 12231, 'synset': 'winch.n.01', 'name': 'winch'}, {'id': 12232, 'synset': 'winchester.n.02', 'name': 'Winchester'}, {'id': 12233, 'synset': 'windbreak.n.01', 'name': 'windbreak'}, {'id': 12234, 'synset': 'winder.n.02', 'name': 'winder'}, {'id': 12235, 'synset': 'wind_instrument.n.01', 'name': 'wind_instrument'}, {'id': 12236, 'synset': 'windjammer.n.01', 'name': 'windjammer'}, {'id': 12237, 'synset': 'windmill.n.02', 'name': 'windmill'}, {'id': 12238, 'synset': 'window.n.01', 'name': 'window'}, {'id': 12239, 'synset': 'window.n.08', 'name': 'window'}, {'id': 12240, 'synset': 'window_blind.n.01', 'name': 'window_blind'}, {'id': 12241, 'synset': 'window_envelope.n.01', 'name': 'window_envelope'}, {'id': 12242, 'synset': 'window_frame.n.01', 'name': 'window_frame'}, {'id': 12243, 'synset': 'window_screen.n.01', 'name': 'window_screen'}, {'id': 12244, 'synset': 'window_seat.n.01', 'name': 'window_seat'}, {'id': 12245, 'synset': 'window_shade.n.01', 'name': 'window_shade'}, {'id': 12246, 'synset': 'windowsill.n.01', 'name': 'windowsill'}, {'id': 12247, 'synset': 'windshield.n.01', 'name': 'windshield'}, {'id': 12248, 'synset': 'windsor_chair.n.01', 'name': 'Windsor_chair'}, {'id': 12249, 'synset': 'windsor_knot.n.01', 'name': 'Windsor_knot'}, {'id': 12250, 'synset': 'windsor_tie.n.01', 'name': 'Windsor_tie'}, {'id': 12251, 'synset': 'wind_tee.n.01', 'name': 'wind_tee'}, {'id': 12252, 'synset': 'wind_tunnel.n.01', 'name': 'wind_tunnel'}, {'id': 12253, 'synset': 'wind_turbine.n.01', 'name': 'wind_turbine'}, {'id': 12254, 'synset': 'wine_bar.n.01', 'name': 'wine_bar'}, {'id': 12255, 'synset': 'wine_cask.n.01', 'name': 'wine_cask'}, {'id': 12256, 'synset': 'winepress.n.01', 'name': 'winepress'}, {'id': 12257, 'synset': 'winery.n.01', 'name': 'winery'}, {'id': 12258, 'synset': 'wineskin.n.01', 'name': 'wineskin'}, {'id': 12259, 'synset': 'wing.n.02', 'name': 'wing'}, {'id': 12260, 'synset': 'wing_chair.n.01', 'name': 'wing_chair'}, {'id': 12261, 'synset': 'wing_nut.n.02', 'name': 'wing_nut'}, {'id': 12262, 'synset': 'wing_tip.n.02', 'name': 'wing_tip'}, {'id': 12263, 'synset': 'wing_tip.n.01', 'name': 'wing_tip'}, {'id': 12264, 'synset': 'wiper.n.02', 'name': 'wiper'}, {'id': 12265, 'synset': 'wiper_motor.n.01', 'name': 'wiper_motor'}, {'id': 12266, 'synset': 'wire.n.01', 'name': 'wire'}, {'id': 12267, 'synset': 'wire.n.02', 'name': 'wire'}, {'id': 12268, 'synset': 'wire_cloth.n.01', 'name': 'wire_cloth'}, {'id': 12269, 'synset': 'wire_cutter.n.01', 'name': 'wire_cutter'}, {'id': 12270, 'synset': 'wire_gauge.n.01', 'name': 'wire_gauge'}, {'id': 12271, 'synset': 'wireless_local_area_network.n.01', 'name': 'wireless_local_area_network'}, {'id': 12272, 'synset': 'wire_matrix_printer.n.01', 'name': 'wire_matrix_printer'}, {'id': 12273, 'synset': 'wire_recorder.n.01', 'name': 'wire_recorder'}, {'id': 12274, 'synset': 'wire_stripper.n.01', 'name': 'wire_stripper'}, {'id': 12275, 'synset': 'wirework.n.01', 'name': 'wirework'}, {'id': 12276, 'synset': 'wiring.n.01', 'name': 'wiring'}, {'id': 12277, 'synset': 'wishing_cap.n.01', 'name': 'wishing_cap'}, {'id': 12278, 'synset': 'witness_box.n.01', 'name': 'witness_box'}, {'id': 12279, 'synset': "woman's_clothing.n.01", 'name': "woman's_clothing"}, {'id': 12280, 'synset': 'wood.n.08', 'name': 'wood'}, {'id': 12281, 'synset': 'woodcarving.n.01', 'name': 'woodcarving'}, {'id': 12282, 'synset': 'wood_chisel.n.01', 'name': 'wood_chisel'}, {'id': 12283, 'synset': 'woodenware.n.01', 'name': 'woodenware'}, {'id': 12284, 'synset': 'woodscrew.n.01', 'name': 'woodscrew'}, {'id': 12285, 'synset': 'woodshed.n.01', 'name': 'woodshed'}, {'id': 12286, 'synset': 'wood_vise.n.01', 'name': 'wood_vise'}, {'id': 12287, 'synset': 'woodwind.n.01', 'name': 'woodwind'}, {'id': 12288, 'synset': 'woof.n.01', 'name': 'woof'}, {'id': 12289, 'synset': 'woofer.n.01', 'name': 'woofer'}, {'id': 12290, 'synset': 'wool.n.01', 'name': 'wool'}, {'id': 12291, 'synset': 'workbasket.n.01', 'name': 'workbasket'}, {'id': 12292, 'synset': 'workbench.n.01', 'name': 'workbench'}, {'id': 12293, 'synset': 'work-clothing.n.01', 'name': 'work-clothing'}, {'id': 12294, 'synset': 'workhouse.n.02', 'name': 'workhouse'}, {'id': 12295, 'synset': 'workhouse.n.01', 'name': 'workhouse'}, {'id': 12296, 'synset': 'workpiece.n.01', 'name': 'workpiece'}, {'id': 12297, 'synset': 'workroom.n.01', 'name': 'workroom'}, {'id': 12298, 'synset': 'works.n.04', 'name': 'works'}, {'id': 12299, 'synset': 'work-shirt.n.01', 'name': 'work-shirt'}, {'id': 12300, 'synset': 'workstation.n.01', 'name': 'workstation'}, {'id': 12301, 'synset': 'worktable.n.01', 'name': 'worktable'}, {'id': 12302, 'synset': 'workwear.n.01', 'name': 'workwear'}, {'id': 12303, 'synset': 'world_wide_web.n.01', 'name': 'World_Wide_Web'}, {'id': 12304, 'synset': 'worm_fence.n.01', 'name': 'worm_fence'}, {'id': 12305, 'synset': 'worm_gear.n.01', 'name': 'worm_gear'}, {'id': 12306, 'synset': 'worm_wheel.n.01', 'name': 'worm_wheel'}, {'id': 12307, 'synset': 'worsted.n.01', 'name': 'worsted'}, {'id': 12308, 'synset': 'worsted.n.02', 'name': 'worsted'}, {'id': 12309, 'synset': 'wrap.n.01', 'name': 'wrap'}, {'id': 12310, 'synset': 'wraparound.n.01', 'name': 'wraparound'}, {'id': 12311, 'synset': 'wrapping.n.01', 'name': 'wrapping'}, {'id': 12312, 'synset': 'wreck.n.04', 'name': 'wreck'}, {'id': 12313, 'synset': 'wrestling_mat.n.01', 'name': 'wrestling_mat'}, {'id': 12314, 'synset': 'wringer.n.01', 'name': 'wringer'}, {'id': 12315, 'synset': 'wrist_pad.n.01', 'name': 'wrist_pad'}, {'id': 12316, 'synset': 'wrist_pin.n.01', 'name': 'wrist_pin'}, {'id': 12317, 'synset': 'wristwatch.n.01', 'name': 'wristwatch'}, {'id': 12318, 'synset': 'writing_arm.n.01', 'name': 'writing_arm'}, {'id': 12319, 'synset': 'writing_desk.n.02', 'name': 'writing_desk'}, {'id': 12320, 'synset': 'writing_desk.n.01', 'name': 'writing_desk'}, {'id': 12321, 'synset': 'writing_implement.n.01', 'name': 'writing_implement'}, {'id': 12322, 'synset': 'xerographic_printer.n.01', 'name': 'xerographic_printer'}, {'id': 12323, 'synset': 'xerox.n.02', 'name': 'Xerox'}, {'id': 12324, 'synset': 'x-ray_film.n.01', 'name': 'X-ray_film'}, {'id': 12325, 'synset': 'x-ray_machine.n.01', 'name': 'X-ray_machine'}, {'id': 12326, 'synset': 'x-ray_tube.n.01', 'name': 'X-ray_tube'}, {'id': 12327, 'synset': 'yacht_chair.n.01', 'name': 'yacht_chair'}, {'id': 12328, 'synset': 'yagi.n.01', 'name': 'yagi'}, {'id': 12329, 'synset': 'yard.n.09', 'name': 'yard'}, {'id': 12330, 'synset': 'yard.n.08', 'name': 'yard'}, {'id': 12331, 'synset': 'yardarm.n.01', 'name': 'yardarm'}, {'id': 12332, 'synset': 'yard_marker.n.01', 'name': 'yard_marker'}, {'id': 12333, 'synset': 'yardstick.n.02', 'name': 'yardstick'}, {'id': 12334, 'synset': 'yarmulke.n.01', 'name': 'yarmulke'}, {'id': 12335, 'synset': 'yashmak.n.01', 'name': 'yashmak'}, {'id': 12336, 'synset': 'yataghan.n.01', 'name': 'yataghan'}, {'id': 12337, 'synset': 'yawl.n.02', 'name': 'yawl'}, {'id': 12338, 'synset': 'yawl.n.01', 'name': 'yawl'}, {'id': 12339, 'synset': 'yoke.n.01', 'name': 'yoke'}, {'id': 12340, 'synset': 'yoke.n.06', 'name': 'yoke'}, {'id': 12341, 'synset': 'yurt.n.01', 'name': 'yurt'}, {'id': 12342, 'synset': 'zamboni.n.01', 'name': 'Zamboni'}, {'id': 12343, 'synset': 'zero.n.04', 'name': 'zero'}, {'id': 12344, 'synset': 'ziggurat.n.01', 'name': 'ziggurat'}, {'id': 12345, 'synset': 'zill.n.01', 'name': 'zill'}, {'id': 12346, 'synset': 'zip_gun.n.01', 'name': 'zip_gun'}, {'id': 12347, 'synset': 'zither.n.01', 'name': 'zither'}, {'id': 12348, 'synset': 'zoot_suit.n.01', 'name': 'zoot_suit'}, {'id': 12349, 'synset': 'shading.n.01', 'name': 'shading'}, {'id': 12350, 'synset': 'grain.n.10', 'name': 'grain'}, {'id': 12351, 'synset': 'wood_grain.n.01', 'name': 'wood_grain'}, {'id': 12352, 'synset': 'graining.n.01', 'name': 'graining'}, {'id': 12353, 'synset': 'marbleization.n.01', 'name': 'marbleization'}, {'id': 12354, 'synset': 'light.n.07', 'name': 'light'}, {'id': 12355, 'synset': 'aura.n.02', 'name': 'aura'}, {'id': 12356, 'synset': 'sunniness.n.01', 'name': 'sunniness'}, {'id': 12357, 'synset': 'glint.n.02', 'name': 'glint'}, {'id': 12358, 'synset': 'opalescence.n.01', 'name': 'opalescence'}, {'id': 12359, 'synset': 'polish.n.01', 'name': 'polish'}, {'id': 12360, 'synset': 'primary_color_for_pigments.n.01', 'name': 'primary_color_for_pigments'}, {'id': 12361, 'synset': 'primary_color_for_light.n.01', 'name': 'primary_color_for_light'}, {'id': 12362, 'synset': 'colorlessness.n.01', 'name': 'colorlessness'}, {'id': 12363, 'synset': 'mottle.n.01', 'name': 'mottle'}, {'id': 12364, 'synset': 'achromia.n.01', 'name': 'achromia'}, {'id': 12365, 'synset': 'shade.n.02', 'name': 'shade'}, {'id': 12366, 'synset': 'chromatic_color.n.01', 'name': 'chromatic_color'}, {'id': 12367, 'synset': 'black.n.01', 'name': 'black'}, {'id': 12368, 'synset': 'coal_black.n.01', 'name': 'coal_black'}, {'id': 12369, 'synset': 'alabaster.n.03', 'name': 'alabaster'}, {'id': 12370, 'synset': 'bone.n.03', 'name': 'bone'}, {'id': 12371, 'synset': 'gray.n.01', 'name': 'gray'}, {'id': 12372, 'synset': 'ash_grey.n.01', 'name': 'ash_grey'}, {'id': 12373, 'synset': 'charcoal.n.03', 'name': 'charcoal'}, {'id': 12374, 'synset': 'sanguine.n.01', 'name': 'sanguine'}, {'id': 12375, 'synset': 'turkey_red.n.01', 'name': 'Turkey_red'}, {'id': 12376, 'synset': 'crimson.n.01', 'name': 'crimson'}, {'id': 12377, 'synset': 'dark_red.n.01', 'name': 'dark_red'}, {'id': 12378, 'synset': 'claret.n.01', 'name': 'claret'}, {'id': 12379, 'synset': 'fuschia.n.01', 'name': 'fuschia'}, {'id': 12380, 'synset': 'maroon.n.02', 'name': 'maroon'}, {'id': 12381, 'synset': 'orange.n.02', 'name': 'orange'}, {'id': 12382, 'synset': 'reddish_orange.n.01', 'name': 'reddish_orange'}, {'id': 12383, 'synset': 'yellow.n.01', 'name': 'yellow'}, {'id': 12384, 'synset': 'gamboge.n.02', 'name': 'gamboge'}, {'id': 12385, 'synset': 'pale_yellow.n.01', 'name': 'pale_yellow'}, {'id': 12386, 'synset': 'green.n.01', 'name': 'green'}, {'id': 12387, 'synset': 'greenishness.n.01', 'name': 'greenishness'}, {'id': 12388, 'synset': 'sea_green.n.01', 'name': 'sea_green'}, {'id': 12389, 'synset': 'sage_green.n.01', 'name': 'sage_green'}, {'id': 12390, 'synset': 'bottle_green.n.01', 'name': 'bottle_green'}, {'id': 12391, 'synset': 'emerald.n.03', 'name': 'emerald'}, {'id': 12392, 'synset': 'olive_green.n.01', 'name': 'olive_green'}, {'id': 12393, 'synset': 'jade_green.n.01', 'name': 'jade_green'}, {'id': 12394, 'synset': 'blue.n.01', 'name': 'blue'}, {'id': 12395, 'synset': 'azure.n.01', 'name': 'azure'}, {'id': 12396, 'synset': 'steel_blue.n.01', 'name': 'steel_blue'}, {'id': 12397, 'synset': 'greenish_blue.n.01', 'name': 'greenish_blue'}, {'id': 12398, 'synset': 'purplish_blue.n.01', 'name': 'purplish_blue'}, {'id': 12399, 'synset': 'purple.n.01', 'name': 'purple'}, {'id': 12400, 'synset': 'tyrian_purple.n.02', 'name': 'Tyrian_purple'}, {'id': 12401, 'synset': 'indigo.n.03', 'name': 'indigo'}, {'id': 12402, 'synset': 'lavender.n.02', 'name': 'lavender'}, {'id': 12403, 'synset': 'reddish_purple.n.01', 'name': 'reddish_purple'}, {'id': 12404, 'synset': 'pink.n.01', 'name': 'pink'}, {'id': 12405, 'synset': 'carnation.n.02', 'name': 'carnation'}, {'id': 12406, 'synset': 'rose.n.03', 'name': 'rose'}, {'id': 12407, 'synset': 'chestnut.n.04', 'name': 'chestnut'}, {'id': 12408, 'synset': 'chocolate.n.03', 'name': 'chocolate'}, {'id': 12409, 'synset': 'light_brown.n.01', 'name': 'light_brown'}, {'id': 12410, 'synset': 'tan.n.02', 'name': 'tan'}, {'id': 12411, 'synset': 'beige.n.01', 'name': 'beige'}, {'id': 12412, 'synset': 'reddish_brown.n.01', 'name': 'reddish_brown'}, {'id': 12413, 'synset': 'brick_red.n.01', 'name': 'brick_red'}, {'id': 12414, 'synset': 'copper.n.04', 'name': 'copper'}, {'id': 12415, 'synset': 'indian_red.n.03', 'name': 'Indian_red'}, {'id': 12416, 'synset': 'puce.n.01', 'name': 'puce'}, {'id': 12417, 'synset': 'olive.n.05', 'name': 'olive'}, {'id': 12418, 'synset': 'ultramarine.n.02', 'name': 'ultramarine'}, {'id': 12419, 'synset': 'complementary_color.n.01', 'name': 'complementary_color'}, {'id': 12420, 'synset': 'pigmentation.n.02', 'name': 'pigmentation'}, {'id': 12421, 'synset': 'complexion.n.01', 'name': 'complexion'}, {'id': 12422, 'synset': 'ruddiness.n.01', 'name': 'ruddiness'}, {'id': 12423, 'synset': 'nonsolid_color.n.01', 'name': 'nonsolid_color'}, {'id': 12424, 'synset': 'aposematic_coloration.n.01', 'name': 'aposematic_coloration'}, {'id': 12425, 'synset': 'cryptic_coloration.n.01', 'name': 'cryptic_coloration'}, {'id': 12426, 'synset': 'ring.n.01', 'name': 'ring'}, {'id': 12427, 'synset': 'center_of_curvature.n.01', 'name': 'center_of_curvature'}, {'id': 12428, 'synset': 'cadaver.n.01', 'name': 'cadaver'}, {'id': 12429, 'synset': 'mandibular_notch.n.01', 'name': 'mandibular_notch'}, {'id': 12430, 'synset': 'rib.n.05', 'name': 'rib'}, {'id': 12431, 'synset': 'skin.n.01', 'name': 'skin'}, {'id': 12432, 'synset': 'skin_graft.n.01', 'name': 'skin_graft'}, {'id': 12433, 'synset': 'epidermal_cell.n.01', 'name': 'epidermal_cell'}, {'id': 12434, 'synset': 'melanocyte.n.01', 'name': 'melanocyte'}, {'id': 12435, 'synset': 'prickle_cell.n.01', 'name': 'prickle_cell'}, {'id': 12436, 'synset': 'columnar_cell.n.01', 'name': 'columnar_cell'}, {'id': 12437, 'synset': 'spongioblast.n.01', 'name': 'spongioblast'}, {'id': 12438, 'synset': 'squamous_cell.n.01', 'name': 'squamous_cell'}, {'id': 12439, 'synset': 'amyloid_plaque.n.01', 'name': 'amyloid_plaque'}, {'id': 12440, 'synset': 'dental_plaque.n.01', 'name': 'dental_plaque'}, {'id': 12441, 'synset': 'macule.n.01', 'name': 'macule'}, {'id': 12442, 'synset': 'freckle.n.01', 'name': 'freckle'}, {'id': 12443, 'synset': 'bouffant.n.01', 'name': 'bouffant'}, {'id': 12444, 'synset': 'sausage_curl.n.01', 'name': 'sausage_curl'}, {'id': 12445, 'synset': 'forelock.n.01', 'name': 'forelock'}, {'id': 12446, 'synset': 'spit_curl.n.01', 'name': 'spit_curl'}, {'id': 12447, 'synset': 'pigtail.n.01', 'name': 'pigtail'}, {'id': 12448, 'synset': 'pageboy.n.02', 'name': 'pageboy'}, {'id': 12449, 'synset': 'pompadour.n.02', 'name': 'pompadour'}, {'id': 12450, 'synset': 'thatch.n.01', 'name': 'thatch'}, {'id': 12451, 'synset': 'soup-strainer.n.01', 'name': 'soup-strainer'}, {'id': 12452, 'synset': 'mustachio.n.01', 'name': 'mustachio'}, {'id': 12453, 'synset': 'walrus_mustache.n.01', 'name': 'walrus_mustache'}, {'id': 12454, 'synset': 'stubble.n.02', 'name': 'stubble'}, {'id': 12455, 'synset': 'vandyke_beard.n.01', 'name': 'vandyke_beard'}, {'id': 12456, 'synset': 'soul_patch.n.01', 'name': 'soul_patch'}, {'id': 12457, 'synset': 'esophageal_smear.n.01', 'name': 'esophageal_smear'}, {'id': 12458, 'synset': 'paraduodenal_smear.n.01', 'name': 'paraduodenal_smear'}, {'id': 12459, 'synset': 'specimen.n.02', 'name': 'specimen'}, {'id': 12460, 'synset': 'punctum.n.01', 'name': 'punctum'}, {'id': 12461, 'synset': 'glenoid_fossa.n.02', 'name': 'glenoid_fossa'}, {'id': 12462, 'synset': 'diastema.n.01', 'name': 'diastema'}, {'id': 12463, 'synset': 'marrow.n.01', 'name': 'marrow'}, {'id': 12464, 'synset': 'mouth.n.01', 'name': 'mouth'}, {'id': 12465, 'synset': 'canthus.n.01', 'name': 'canthus'}, {'id': 12466, 'synset': 'milk.n.02', 'name': 'milk'}, {'id': 12467, 'synset': "mother's_milk.n.01", 'name': "mother's_milk"}, {'id': 12468, 'synset': 'colostrum.n.01', 'name': 'colostrum'}, {'id': 12469, 'synset': 'vein.n.01', 'name': 'vein'}, {'id': 12470, 'synset': 'ganglion_cell.n.01', 'name': 'ganglion_cell'}, {'id': 12471, 'synset': 'x_chromosome.n.01', 'name': 'X_chromosome'}, {'id': 12472, 'synset': 'embryonic_cell.n.01', 'name': 'embryonic_cell'}, {'id': 12473, 'synset': 'myeloblast.n.01', 'name': 'myeloblast'}, {'id': 12474, 'synset': 'sideroblast.n.01', 'name': 'sideroblast'}, {'id': 12475, 'synset': 'osteocyte.n.01', 'name': 'osteocyte'}, {'id': 12476, 'synset': 'megalocyte.n.01', 'name': 'megalocyte'}, {'id': 12477, 'synset': 'leukocyte.n.01', 'name': 'leukocyte'}, {'id': 12478, 'synset': 'histiocyte.n.01', 'name': 'histiocyte'}, {'id': 12479, 'synset': 'fixed_phagocyte.n.01', 'name': 'fixed_phagocyte'}, {'id': 12480, 'synset': 'lymphocyte.n.01', 'name': 'lymphocyte'}, {'id': 12481, 'synset': 'monoblast.n.01', 'name': 'monoblast'}, {'id': 12482, 'synset': 'neutrophil.n.01', 'name': 'neutrophil'}, {'id': 12483, 'synset': 'microphage.n.01', 'name': 'microphage'}, {'id': 12484, 'synset': 'sickle_cell.n.01', 'name': 'sickle_cell'}, {'id': 12485, 'synset': 'siderocyte.n.01', 'name': 'siderocyte'}, {'id': 12486, 'synset': 'spherocyte.n.01', 'name': 'spherocyte'}, {'id': 12487, 'synset': 'ootid.n.01', 'name': 'ootid'}, {'id': 12488, 'synset': 'oocyte.n.01', 'name': 'oocyte'}, {'id': 12489, 'synset': 'spermatid.n.01', 'name': 'spermatid'}, {'id': 12490, 'synset': 'leydig_cell.n.01', 'name': 'Leydig_cell'}, {'id': 12491, 'synset': 'striated_muscle_cell.n.01', 'name': 'striated_muscle_cell'}, {'id': 12492, 'synset': 'smooth_muscle_cell.n.01', 'name': 'smooth_muscle_cell'}, {'id': 12493, 'synset': "ranvier's_nodes.n.01", 'name': "Ranvier's_nodes"}, {'id': 12494, 'synset': 'neuroglia.n.01', 'name': 'neuroglia'}, {'id': 12495, 'synset': 'astrocyte.n.01', 'name': 'astrocyte'}, {'id': 12496, 'synset': 'protoplasmic_astrocyte.n.01', 'name': 'protoplasmic_astrocyte'}, {'id': 12497, 'synset': 'oligodendrocyte.n.01', 'name': 'oligodendrocyte'}, {'id': 12498, 'synset': 'proprioceptor.n.01', 'name': 'proprioceptor'}, {'id': 12499, 'synset': 'dendrite.n.01', 'name': 'dendrite'}, {'id': 12500, 'synset': 'sensory_fiber.n.01', 'name': 'sensory_fiber'}, {'id': 12501, 'synset': 'subarachnoid_space.n.01', 'name': 'subarachnoid_space'}, {'id': 12502, 'synset': 'cerebral_cortex.n.01', 'name': 'cerebral_cortex'}, {'id': 12503, 'synset': 'renal_cortex.n.01', 'name': 'renal_cortex'}, {'id': 12504, 'synset': 'prepuce.n.02', 'name': 'prepuce'}, {'id': 12505, 'synset': 'head.n.01', 'name': 'head'}, {'id': 12506, 'synset': 'scalp.n.01', 'name': 'scalp'}, {'id': 12507, 'synset': 'frontal_eminence.n.01', 'name': 'frontal_eminence'}, {'id': 12508, 'synset': 'suture.n.01', 'name': 'suture'}, {'id': 12509, 'synset': 'foramen_magnum.n.01', 'name': 'foramen_magnum'}, {'id': 12510, 'synset': 'esophagogastric_junction.n.01', 'name': 'esophagogastric_junction'}, {'id': 12511, 'synset': 'heel.n.02', 'name': 'heel'}, {'id': 12512, 'synset': 'cuticle.n.01', 'name': 'cuticle'}, {'id': 12513, 'synset': 'hangnail.n.01', 'name': 'hangnail'}, {'id': 12514, 'synset': 'exoskeleton.n.01', 'name': 'exoskeleton'}, {'id': 12515, 'synset': 'abdominal_wall.n.01', 'name': 'abdominal_wall'}, {'id': 12516, 'synset': 'lemon.n.04', 'name': 'lemon'}, {'id': 12517, 'synset': 'coordinate_axis.n.01', 'name': 'coordinate_axis'}, {'id': 12518, 'synset': 'landscape.n.04', 'name': 'landscape'}, {'id': 12519, 'synset': 'medium.n.01', 'name': 'medium'}, {'id': 12520, 'synset': 'vehicle.n.02', 'name': 'vehicle'}, {'id': 12521, 'synset': 'paper.n.04', 'name': 'paper'}, {'id': 12522, 'synset': 'channel.n.01', 'name': 'channel'}, {'id': 12523, 'synset': 'film.n.02', 'name': 'film'}, {'id': 12524, 'synset': 'silver_screen.n.01', 'name': 'silver_screen'}, {'id': 12525, 'synset': 'free_press.n.01', 'name': 'free_press'}, {'id': 12526, 'synset': 'press.n.02', 'name': 'press'}, {'id': 12527, 'synset': 'print_media.n.01', 'name': 'print_media'}, {'id': 12528, 'synset': 'storage_medium.n.01', 'name': 'storage_medium'}, {'id': 12529, 'synset': 'magnetic_storage_medium.n.01', 'name': 'magnetic_storage_medium'}, {'id': 12530, 'synset': 'journalism.n.01', 'name': 'journalism'}, {'id': 12531, 'synset': 'fleet_street.n.02', 'name': 'Fleet_Street'}, {'id': 12532, 'synset': 'photojournalism.n.01', 'name': 'photojournalism'}, {'id': 12533, 'synset': 'news_photography.n.01', 'name': 'news_photography'}, {'id': 12534, 'synset': 'rotogravure.n.02', 'name': 'rotogravure'}, {'id': 12535, 'synset': 'daily.n.01', 'name': 'daily'}, {'id': 12536, 'synset': 'gazette.n.01', 'name': 'gazette'}, {'id': 12537, 'synset': 'school_newspaper.n.01', 'name': 'school_newspaper'}, {'id': 12538, 'synset': 'tabloid.n.02', 'name': 'tabloid'}, {'id': 12539, 'synset': 'yellow_journalism.n.01', 'name': 'yellow_journalism'}, {'id': 12540, 'synset': 'telecommunication.n.01', 'name': 'telecommunication'}, {'id': 12541, 'synset': 'telephone.n.02', 'name': 'telephone'}, {'id': 12542, 'synset': 'voice_mail.n.01', 'name': 'voice_mail'}, {'id': 12543, 'synset': 'call.n.01', 'name': 'call'}, {'id': 12544, 'synset': 'call-back.n.01', 'name': 'call-back'}, {'id': 12545, 'synset': 'collect_call.n.01', 'name': 'collect_call'}, {'id': 12546, 'synset': 'call_forwarding.n.01', 'name': 'call_forwarding'}, {'id': 12547, 'synset': 'call-in.n.01', 'name': 'call-in'}, {'id': 12548, 'synset': 'call_waiting.n.01', 'name': 'call_waiting'}, {'id': 12549, 'synset': 'crank_call.n.01', 'name': 'crank_call'}, {'id': 12550, 'synset': 'local_call.n.01', 'name': 'local_call'}, {'id': 12551, 'synset': 'long_distance.n.01', 'name': 'long_distance'}, {'id': 12552, 'synset': 'toll_call.n.01', 'name': 'toll_call'}, {'id': 12553, 'synset': 'wake-up_call.n.02', 'name': 'wake-up_call'}, {'id': 12554, 'synset': 'three-way_calling.n.01', 'name': 'three-way_calling'}, {'id': 12555, 'synset': 'telegraphy.n.01', 'name': 'telegraphy'}, {'id': 12556, 'synset': 'cable.n.01', 'name': 'cable'}, {'id': 12557, 'synset': 'wireless.n.02', 'name': 'wireless'}, {'id': 12558, 'synset': 'radiotelegraph.n.01', 'name': 'radiotelegraph'}, {'id': 12559, 'synset': 'radiotelephone.n.01', 'name': 'radiotelephone'}, {'id': 12560, 'synset': 'broadcasting.n.02', 'name': 'broadcasting'}, {'id': 12561, 'synset': 'rediffusion.n.01', 'name': 'Rediffusion'}, {'id': 12562, 'synset': 'multiplex.n.01', 'name': 'multiplex'}, {'id': 12563, 'synset': 'radio.n.01', 'name': 'radio'}, {'id': 12564, 'synset': 'television.n.01', 'name': 'television'}, {'id': 12565, 'synset': 'cable_television.n.01', 'name': 'cable_television'}, {'id': 12566, 'synset': 'high-definition_television.n.01', 'name': 'high-definition_television'}, {'id': 12567, 'synset': 'reception.n.03', 'name': 'reception'}, {'id': 12568, 'synset': 'signal_detection.n.01', 'name': 'signal_detection'}, {'id': 12569, 'synset': 'hakham.n.01', 'name': 'Hakham'}, {'id': 12570, 'synset': 'web_site.n.01', 'name': 'web_site'}, {'id': 12571, 'synset': 'chat_room.n.01', 'name': 'chat_room'}, {'id': 12572, 'synset': 'portal_site.n.01', 'name': 'portal_site'}, {'id': 12573, 'synset': 'jotter.n.01', 'name': 'jotter'}, {'id': 12574, 'synset': 'breviary.n.01', 'name': 'breviary'}, {'id': 12575, 'synset': 'wordbook.n.01', 'name': 'wordbook'}, {'id': 12576, 'synset': 'desk_dictionary.n.01', 'name': 'desk_dictionary'}, {'id': 12577, 'synset': 'reckoner.n.02', 'name': 'reckoner'}, {'id': 12578, 'synset': 'document.n.01', 'name': 'document'}, {'id': 12579, 'synset': 'album.n.01', 'name': 'album'}, {'id': 12580, 'synset': 'concept_album.n.01', 'name': 'concept_album'}, {'id': 12581, 'synset': 'rock_opera.n.01', 'name': 'rock_opera'}, {'id': 12582, 'synset': 'tribute_album.n.01', 'name': 'tribute_album'}, {'id': 12583, 'synset': 'magazine.n.01', 'name': 'magazine'}, {'id': 12584, 'synset': 'colour_supplement.n.01', 'name': 'colour_supplement'}, {'id': 12585, 'synset': 'news_magazine.n.01', 'name': 'news_magazine'}, {'id': 12586, 'synset': 'pulp.n.04', 'name': 'pulp'}, {'id': 12587, 'synset': 'slick.n.02', 'name': 'slick'}, {'id': 12588, 'synset': 'trade_magazine.n.01', 'name': 'trade_magazine'}, {'id': 12589, 'synset': 'movie.n.01', 'name': 'movie'}, {'id': 12590, 'synset': 'outtake.n.01', 'name': 'outtake'}, {'id': 12591, 'synset': "shoot-'em-up.n.01", 'name': "shoot-'em-up"}, {'id': 12592, 'synset': 'spaghetti_western.n.01', 'name': 'spaghetti_Western'}, {'id': 12593, 'synset': 'encyclical.n.01', 'name': 'encyclical'}, {'id': 12594, 'synset': 'crossword_puzzle.n.01', 'name': 'crossword_puzzle'}, {'id': 12595, 'synset': 'sign.n.02', 'name': 'sign'}, {'id': 12596, 'synset': 'swastika.n.01', 'name': 'swastika'}, {'id': 12597, 'synset': 'concert.n.01', 'name': 'concert'}, {'id': 12598, 'synset': 'artwork.n.01', 'name': 'artwork'}, {'id': 12599, 'synset': 'lobe.n.03', 'name': 'lobe'}, {'id': 12600, 'synset': 'book_jacket.n.01', 'name': 'book_jacket'}, {'id': 12601, 'synset': 'cairn.n.01', 'name': 'cairn'}, {'id': 12602, 'synset': 'three-day_event.n.01', 'name': 'three-day_event'}, {'id': 12603, 'synset': 'comfort_food.n.01', 'name': 'comfort_food'}, {'id': 12604, 'synset': 'comestible.n.01', 'name': 'comestible'}, {'id': 12605, 'synset': 'tuck.n.01', 'name': 'tuck'}, {'id': 12606, 'synset': 'course.n.07', 'name': 'course'}, {'id': 12607, 'synset': 'dainty.n.01', 'name': 'dainty'}, {'id': 12608, 'synset': 'dish.n.02', 'name': 'dish'}, {'id': 12609, 'synset': 'fast_food.n.01', 'name': 'fast_food'}, {'id': 12610, 'synset': 'finger_food.n.01', 'name': 'finger_food'}, {'id': 12611, 'synset': 'ingesta.n.01', 'name': 'ingesta'}, {'id': 12612, 'synset': 'kosher.n.01', 'name': 'kosher'}, {'id': 12613, 'synset': 'fare.n.04', 'name': 'fare'}, {'id': 12614, 'synset': 'diet.n.03', 'name': 'diet'}, {'id': 12615, 'synset': 'diet.n.01', 'name': 'diet'}, {'id': 12616, 'synset': 'dietary.n.01', 'name': 'dietary'}, {'id': 12617, 'synset': 'balanced_diet.n.01', 'name': 'balanced_diet'}, {'id': 12618, 'synset': 'bland_diet.n.01', 'name': 'bland_diet'}, {'id': 12619, 'synset': 'clear_liquid_diet.n.01', 'name': 'clear_liquid_diet'}, {'id': 12620, 'synset': 'diabetic_diet.n.01', 'name': 'diabetic_diet'}, {'id': 12621, 'synset': 'dietary_supplement.n.01', 'name': 'dietary_supplement'}, {'id': 12622, 'synset': 'carbohydrate_loading.n.01', 'name': 'carbohydrate_loading'}, {'id': 12623, 'synset': 'fad_diet.n.01', 'name': 'fad_diet'}, {'id': 12624, 'synset': 'gluten-free_diet.n.01', 'name': 'gluten-free_diet'}, {'id': 12625, 'synset': 'high-protein_diet.n.01', 'name': 'high-protein_diet'}, {'id': 12626, 'synset': 'high-vitamin_diet.n.01', 'name': 'high-vitamin_diet'}, {'id': 12627, 'synset': 'light_diet.n.01', 'name': 'light_diet'}, {'id': 12628, 'synset': 'liquid_diet.n.01', 'name': 'liquid_diet'}, {'id': 12629, 'synset': 'low-calorie_diet.n.01', 'name': 'low-calorie_diet'}, {'id': 12630, 'synset': 'low-fat_diet.n.01', 'name': 'low-fat_diet'}, {'id': 12631, 'synset': 'low-sodium_diet.n.01', 'name': 'low-sodium_diet'}, {'id': 12632, 'synset': 'macrobiotic_diet.n.01', 'name': 'macrobiotic_diet'}, {'id': 12633, 'synset': 'reducing_diet.n.01', 'name': 'reducing_diet'}, {'id': 12634, 'synset': 'soft_diet.n.01', 'name': 'soft_diet'}, {'id': 12635, 'synset': 'vegetarianism.n.01', 'name': 'vegetarianism'}, {'id': 12636, 'synset': 'menu.n.02', 'name': 'menu'}, {'id': 12637, 'synset': 'chow.n.02', 'name': 'chow'}, {'id': 12638, 'synset': 'board.n.04', 'name': 'board'}, {'id': 12639, 'synset': 'mess.n.04', 'name': 'mess'}, {'id': 12640, 'synset': 'ration.n.01', 'name': 'ration'}, {'id': 12641, 'synset': 'field_ration.n.01', 'name': 'field_ration'}, {'id': 12642, 'synset': 'k_ration.n.01', 'name': 'K_ration'}, {'id': 12643, 'synset': 'c-ration.n.01', 'name': 'C-ration'}, {'id': 12644, 'synset': 'foodstuff.n.02', 'name': 'foodstuff'}, {'id': 12645, 'synset': 'starches.n.01', 'name': 'starches'}, {'id': 12646, 'synset': 'breadstuff.n.02', 'name': 'breadstuff'}, {'id': 12647, 'synset': 'coloring.n.01', 'name': 'coloring'}, {'id': 12648, 'synset': 'concentrate.n.02', 'name': 'concentrate'}, {'id': 12649, 'synset': 'tomato_concentrate.n.01', 'name': 'tomato_concentrate'}, {'id': 12650, 'synset': 'meal.n.03', 'name': 'meal'}, {'id': 12651, 'synset': 'kibble.n.01', 'name': 'kibble'}, {'id': 12652, 'synset': 'farina.n.01', 'name': 'farina'}, {'id': 12653, 'synset': 'matzo_meal.n.01', 'name': 'matzo_meal'}, {'id': 12654, 'synset': 'oatmeal.n.02', 'name': 'oatmeal'}, {'id': 12655, 'synset': 'pea_flour.n.01', 'name': 'pea_flour'}, {'id': 12656, 'synset': 'roughage.n.01', 'name': 'roughage'}, {'id': 12657, 'synset': 'bran.n.02', 'name': 'bran'}, {'id': 12658, 'synset': 'flour.n.01', 'name': 'flour'}, {'id': 12659, 'synset': 'plain_flour.n.01', 'name': 'plain_flour'}, {'id': 12660, 'synset': 'wheat_flour.n.01', 'name': 'wheat_flour'}, {'id': 12661, 'synset': 'whole_wheat_flour.n.01', 'name': 'whole_wheat_flour'}, {'id': 12662, 'synset': 'soybean_meal.n.01', 'name': 'soybean_meal'}, {'id': 12663, 'synset': 'semolina.n.01', 'name': 'semolina'}, {'id': 12664, 'synset': 'corn_gluten_feed.n.01', 'name': 'corn_gluten_feed'}, {'id': 12665, 'synset': 'nutriment.n.01', 'name': 'nutriment'}, {'id': 12666, 'synset': 'commissariat.n.01', 'name': 'commissariat'}, {'id': 12667, 'synset': 'larder.n.01', 'name': 'larder'}, {'id': 12668, 'synset': 'frozen_food.n.01', 'name': 'frozen_food'}, {'id': 12669, 'synset': 'canned_food.n.01', 'name': 'canned_food'}, {'id': 12670, 'synset': 'canned_meat.n.01', 'name': 'canned_meat'}, {'id': 12671, 'synset': 'spam.n.01', 'name': 'Spam'}, {'id': 12672, 'synset': 'dehydrated_food.n.01', 'name': 'dehydrated_food'}, {'id': 12673, 'synset': 'square_meal.n.01', 'name': 'square_meal'}, {'id': 12674, 'synset': 'meal.n.01', 'name': 'meal'}, {'id': 12675, 'synset': 'potluck.n.01', 'name': 'potluck'}, {'id': 12676, 'synset': 'refection.n.01', 'name': 'refection'}, {'id': 12677, 'synset': 'refreshment.n.01', 'name': 'refreshment'}, {'id': 12678, 'synset': 'breakfast.n.01', 'name': 'breakfast'}, {'id': 12679, 'synset': 'continental_breakfast.n.01', 'name': 'continental_breakfast'}, {'id': 12680, 'synset': 'brunch.n.01', 'name': 'brunch'}, {'id': 12681, 'synset': 'lunch.n.01', 'name': 'lunch'}, {'id': 12682, 'synset': 'business_lunch.n.01', 'name': 'business_lunch'}, {'id': 12683, 'synset': 'high_tea.n.01', 'name': 'high_tea'}, {'id': 12684, 'synset': 'tea.n.02', 'name': 'tea'}, {'id': 12685, 'synset': 'dinner.n.01', 'name': 'dinner'}, {'id': 12686, 'synset': 'supper.n.01', 'name': 'supper'}, {'id': 12687, 'synset': 'buffet.n.02', 'name': 'buffet'}, {'id': 12688, 'synset': 'picnic.n.03', 'name': 'picnic'}, {'id': 12689, 'synset': 'cookout.n.01', 'name': 'cookout'}, {'id': 12690, 'synset': 'barbecue.n.02', 'name': 'barbecue'}, {'id': 12691, 'synset': 'clambake.n.01', 'name': 'clambake'}, {'id': 12692, 'synset': 'fish_fry.n.01', 'name': 'fish_fry'}, {'id': 12693, 'synset': 'bite.n.04', 'name': 'bite'}, {'id': 12694, 'synset': 'nosh.n.01', 'name': 'nosh'}, {'id': 12695, 'synset': 'nosh-up.n.01', 'name': 'nosh-up'}, {'id': 12696, 'synset': "ploughman's_lunch.n.01", 'name': "ploughman's_lunch"}, {'id': 12697, 'synset': 'coffee_break.n.01', 'name': 'coffee_break'}, {'id': 12698, 'synset': 'banquet.n.02', 'name': 'banquet'}, {'id': 12699, 'synset': 'entree.n.01', 'name': 'entree'}, {'id': 12700, 'synset': 'piece_de_resistance.n.02', 'name': 'piece_de_resistance'}, {'id': 12701, 'synset': 'plate.n.08', 'name': 'plate'}, {'id': 12702, 'synset': 'adobo.n.01', 'name': 'adobo'}, {'id': 12703, 'synset': 'side_dish.n.01', 'name': 'side_dish'}, {'id': 12704, 'synset': 'special.n.02', 'name': 'special'}, {'id': 12705, 'synset': 'chicken_casserole.n.01', 'name': 'chicken_casserole'}, {'id': 12706, 'synset': 'chicken_cacciatore.n.01', 'name': 'chicken_cacciatore'}, {'id': 12707, 'synset': 'antipasto.n.01', 'name': 'antipasto'}, {'id': 12708, 'synset': 'appetizer.n.01', 'name': 'appetizer'}, {'id': 12709, 'synset': 'canape.n.01', 'name': 'canape'}, {'id': 12710, 'synset': 'cocktail.n.02', 'name': 'cocktail'}, {'id': 12711, 'synset': 'fruit_cocktail.n.01', 'name': 'fruit_cocktail'}, {'id': 12712, 'synset': 'crab_cocktail.n.01', 'name': 'crab_cocktail'}, {'id': 12713, 'synset': 'shrimp_cocktail.n.01', 'name': 'shrimp_cocktail'}, {'id': 12714, 'synset': "hors_d'oeuvre.n.01", 'name': "hors_d'oeuvre"}, {'id': 12715, 'synset': 'relish.n.02', 'name': 'relish'}, {'id': 12716, 'synset': 'dip.n.04', 'name': 'dip'}, {'id': 12717, 'synset': 'bean_dip.n.01', 'name': 'bean_dip'}, {'id': 12718, 'synset': 'cheese_dip.n.01', 'name': 'cheese_dip'}, {'id': 12719, 'synset': 'clam_dip.n.01', 'name': 'clam_dip'}, {'id': 12720, 'synset': 'guacamole.n.01', 'name': 'guacamole'}, {'id': 12721, 'synset': 'soup_du_jour.n.01', 'name': 'soup_du_jour'}, {'id': 12722, 'synset': 'alphabet_soup.n.02', 'name': 'alphabet_soup'}, {'id': 12723, 'synset': 'consomme.n.01', 'name': 'consomme'}, {'id': 12724, 'synset': 'madrilene.n.01', 'name': 'madrilene'}, {'id': 12725, 'synset': 'bisque.n.01', 'name': 'bisque'}, {'id': 12726, 'synset': 'borsch.n.01', 'name': 'borsch'}, {'id': 12727, 'synset': 'broth.n.02', 'name': 'broth'}, {'id': 12728, 'synset': 'barley_water.n.01', 'name': 'barley_water'}, {'id': 12729, 'synset': 'bouillon.n.01', 'name': 'bouillon'}, {'id': 12730, 'synset': 'beef_broth.n.01', 'name': 'beef_broth'}, {'id': 12731, 'synset': 'chicken_broth.n.01', 'name': 'chicken_broth'}, {'id': 12732, 'synset': 'broth.n.01', 'name': 'broth'}, {'id': 12733, 'synset': 'stock_cube.n.01', 'name': 'stock_cube'}, {'id': 12734, 'synset': 'chicken_soup.n.01', 'name': 'chicken_soup'}, {'id': 12735, 'synset': 'cock-a-leekie.n.01', 'name': 'cock-a-leekie'}, {'id': 12736, 'synset': 'gazpacho.n.01', 'name': 'gazpacho'}, {'id': 12737, 'synset': 'gumbo.n.04', 'name': 'gumbo'}, {'id': 12738, 'synset': 'julienne.n.02', 'name': 'julienne'}, {'id': 12739, 'synset': 'marmite.n.01', 'name': 'marmite'}, {'id': 12740, 'synset': 'mock_turtle_soup.n.01', 'name': 'mock_turtle_soup'}, {'id': 12741, 'synset': 'mulligatawny.n.01', 'name': 'mulligatawny'}, {'id': 12742, 'synset': 'oxtail_soup.n.01', 'name': 'oxtail_soup'}, {'id': 12743, 'synset': 'pea_soup.n.01', 'name': 'pea_soup'}, {'id': 12744, 'synset': 'pepper_pot.n.01', 'name': 'pepper_pot'}, {'id': 12745, 'synset': 'petite_marmite.n.01', 'name': 'petite_marmite'}, {'id': 12746, 'synset': 'potage.n.01', 'name': 'potage'}, {'id': 12747, 'synset': 'pottage.n.01', 'name': 'pottage'}, {'id': 12748, 'synset': 'turtle_soup.n.01', 'name': 'turtle_soup'}, {'id': 12749, 'synset': 'eggdrop_soup.n.01', 'name': 'eggdrop_soup'}, {'id': 12750, 'synset': 'chowder.n.01', 'name': 'chowder'}, {'id': 12751, 'synset': 'corn_chowder.n.01', 'name': 'corn_chowder'}, {'id': 12752, 'synset': 'clam_chowder.n.01', 'name': 'clam_chowder'}, {'id': 12753, 'synset': 'manhattan_clam_chowder.n.01', 'name': 'Manhattan_clam_chowder'}, {'id': 12754, 'synset': 'new_england_clam_chowder.n.01', 'name': 'New_England_clam_chowder'}, {'id': 12755, 'synset': 'fish_chowder.n.01', 'name': 'fish_chowder'}, {'id': 12756, 'synset': 'won_ton.n.02', 'name': 'won_ton'}, {'id': 12757, 'synset': 'split-pea_soup.n.01', 'name': 'split-pea_soup'}, {'id': 12758, 'synset': 'green_pea_soup.n.01', 'name': 'green_pea_soup'}, {'id': 12759, 'synset': 'lentil_soup.n.01', 'name': 'lentil_soup'}, {'id': 12760, 'synset': 'scotch_broth.n.01', 'name': 'Scotch_broth'}, {'id': 12761, 'synset': 'vichyssoise.n.01', 'name': 'vichyssoise'}, {'id': 12762, 'synset': 'bigos.n.01', 'name': 'bigos'}, {'id': 12763, 'synset': 'brunswick_stew.n.01', 'name': 'Brunswick_stew'}, {'id': 12764, 'synset': 'burgoo.n.03', 'name': 'burgoo'}, {'id': 12765, 'synset': 'burgoo.n.02', 'name': 'burgoo'}, {'id': 12766, 'synset': 'olla_podrida.n.01', 'name': 'olla_podrida'}, {'id': 12767, 'synset': 'mulligan_stew.n.01', 'name': 'mulligan_stew'}, {'id': 12768, 'synset': 'purloo.n.01', 'name': 'purloo'}, {'id': 12769, 'synset': 'goulash.n.01', 'name': 'goulash'}, {'id': 12770, 'synset': 'hotchpotch.n.02', 'name': 'hotchpotch'}, {'id': 12771, 'synset': 'hot_pot.n.01', 'name': 'hot_pot'}, {'id': 12772, 'synset': 'beef_goulash.n.01', 'name': 'beef_goulash'}, {'id': 12773, 'synset': 'pork-and-veal_goulash.n.01', 'name': 'pork-and-veal_goulash'}, {'id': 12774, 'synset': 'porkholt.n.01', 'name': 'porkholt'}, {'id': 12775, 'synset': 'irish_stew.n.01', 'name': 'Irish_stew'}, {'id': 12776, 'synset': 'oyster_stew.n.01', 'name': 'oyster_stew'}, {'id': 12777, 'synset': 'lobster_stew.n.01', 'name': 'lobster_stew'}, {'id': 12778, 'synset': 'lobscouse.n.01', 'name': 'lobscouse'}, {'id': 12779, 'synset': 'fish_stew.n.01', 'name': 'fish_stew'}, {'id': 12780, 'synset': 'bouillabaisse.n.01', 'name': 'bouillabaisse'}, {'id': 12781, 'synset': 'matelote.n.01', 'name': 'matelote'}, {'id': 12782, 'synset': 'paella.n.01', 'name': 'paella'}, {'id': 12783, 'synset': 'fricassee.n.01', 'name': 'fricassee'}, {'id': 12784, 'synset': 'chicken_stew.n.01', 'name': 'chicken_stew'}, {'id': 12785, 'synset': 'turkey_stew.n.01', 'name': 'turkey_stew'}, {'id': 12786, 'synset': 'beef_stew.n.01', 'name': 'beef_stew'}, {'id': 12787, 'synset': 'ragout.n.01', 'name': 'ragout'}, {'id': 12788, 'synset': 'ratatouille.n.01', 'name': 'ratatouille'}, {'id': 12789, 'synset': 'salmi.n.01', 'name': 'salmi'}, {'id': 12790, 'synset': 'pot-au-feu.n.01', 'name': 'pot-au-feu'}, {'id': 12791, 'synset': 'slumgullion.n.01', 'name': 'slumgullion'}, {'id': 12792, 'synset': 'smorgasbord.n.02', 'name': 'smorgasbord'}, {'id': 12793, 'synset': 'viand.n.01', 'name': 'viand'}, {'id': 12794, 'synset': 'ready-mix.n.01', 'name': 'ready-mix'}, {'id': 12795, 'synset': 'brownie_mix.n.01', 'name': 'brownie_mix'}, {'id': 12796, 'synset': 'cake_mix.n.01', 'name': 'cake_mix'}, {'id': 12797, 'synset': 'lemonade_mix.n.01', 'name': 'lemonade_mix'}, {'id': 12798, 'synset': 'self-rising_flour.n.01', 'name': 'self-rising_flour'}, {'id': 12799, 'synset': 'choice_morsel.n.01', 'name': 'choice_morsel'}, {'id': 12800, 'synset': 'savory.n.04', 'name': 'savory'}, {'id': 12801, 'synset': "calf's-foot_jelly.n.01", 'name': "calf's-foot_jelly"}, {'id': 12802, 'synset': 'caramel.n.02', 'name': 'caramel'}, {'id': 12803, 'synset': 'lump_sugar.n.01', 'name': 'lump_sugar'}, {'id': 12804, 'synset': 'cane_sugar.n.02', 'name': 'cane_sugar'}, {'id': 12805, 'synset': 'castor_sugar.n.01', 'name': 'castor_sugar'}, {'id': 12806, 'synset': 'powdered_sugar.n.01', 'name': 'powdered_sugar'}, {'id': 12807, 'synset': 'granulated_sugar.n.01', 'name': 'granulated_sugar'}, {'id': 12808, 'synset': 'icing_sugar.n.01', 'name': 'icing_sugar'}, {'id': 12809, 'synset': 'corn_sugar.n.02', 'name': 'corn_sugar'}, {'id': 12810, 'synset': 'brown_sugar.n.01', 'name': 'brown_sugar'}, {'id': 12811, 'synset': 'demerara.n.05', 'name': 'demerara'}, {'id': 12812, 'synset': 'sweet.n.03', 'name': 'sweet'}, {'id': 12813, 'synset': 'confectionery.n.01', 'name': 'confectionery'}, {'id': 12814, 'synset': 'confiture.n.01', 'name': 'confiture'}, {'id': 12815, 'synset': 'sweetmeat.n.01', 'name': 'sweetmeat'}, {'id': 12816, 'synset': 'candy.n.01', 'name': 'candy'}, {'id': 12817, 'synset': 'carob_bar.n.01', 'name': 'carob_bar'}, {'id': 12818, 'synset': 'hardbake.n.01', 'name': 'hardbake'}, {'id': 12819, 'synset': 'hard_candy.n.01', 'name': 'hard_candy'}, {'id': 12820, 'synset': 'barley-sugar.n.01', 'name': 'barley-sugar'}, {'id': 12821, 'synset': 'brandyball.n.01', 'name': 'brandyball'}, {'id': 12822, 'synset': 'jawbreaker.n.01', 'name': 'jawbreaker'}, {'id': 12823, 'synset': 'lemon_drop.n.01', 'name': 'lemon_drop'}, {'id': 12824, 'synset': 'sourball.n.01', 'name': 'sourball'}, {'id': 12825, 'synset': 'patty.n.03', 'name': 'patty'}, {'id': 12826, 'synset': 'peppermint_patty.n.01', 'name': 'peppermint_patty'}, {'id': 12827, 'synset': 'bonbon.n.01', 'name': 'bonbon'}, {'id': 12828, 'synset': 'brittle.n.01', 'name': 'brittle'}, {'id': 12829, 'synset': 'peanut_brittle.n.01', 'name': 'peanut_brittle'}, {'id': 12830, 'synset': 'chewing_gum.n.01', 'name': 'chewing_gum'}, {'id': 12831, 'synset': 'gum_ball.n.01', 'name': 'gum_ball'}, {'id': 12832, 'synset': 'butterscotch.n.01', 'name': 'butterscotch'}, {'id': 12833, 'synset': 'candied_fruit.n.01', 'name': 'candied_fruit'}, {'id': 12834, 'synset': 'candied_apple.n.01', 'name': 'candied_apple'}, {'id': 12835, 'synset': 'crystallized_ginger.n.01', 'name': 'crystallized_ginger'}, {'id': 12836, 'synset': 'grapefruit_peel.n.01', 'name': 'grapefruit_peel'}, {'id': 12837, 'synset': 'lemon_peel.n.02', 'name': 'lemon_peel'}, {'id': 12838, 'synset': 'orange_peel.n.02', 'name': 'orange_peel'}, {'id': 12839, 'synset': 'candied_citrus_peel.n.01', 'name': 'candied_citrus_peel'}, {'id': 12840, 'synset': 'candy_corn.n.01', 'name': 'candy_corn'}, {'id': 12841, 'synset': 'caramel.n.01', 'name': 'caramel'}, {'id': 12842, 'synset': 'center.n.14', 'name': 'center'}, {'id': 12843, 'synset': 'comfit.n.01', 'name': 'comfit'}, {'id': 12844, 'synset': 'cotton_candy.n.01', 'name': 'cotton_candy'}, {'id': 12845, 'synset': 'dragee.n.02', 'name': 'dragee'}, {'id': 12846, 'synset': 'dragee.n.01', 'name': 'dragee'}, {'id': 12847, 'synset': 'fondant.n.01', 'name': 'fondant'}, {'id': 12848, 'synset': 'chocolate_fudge.n.01', 'name': 'chocolate_fudge'}, {'id': 12849, 'synset': 'divinity.n.03', 'name': 'divinity'}, {'id': 12850, 'synset': 'penuche.n.01', 'name': 'penuche'}, {'id': 12851, 'synset': 'gumdrop.n.01', 'name': 'gumdrop'}, {'id': 12852, 'synset': 'jujube.n.03', 'name': 'jujube'}, {'id': 12853, 'synset': 'honey_crisp.n.01', 'name': 'honey_crisp'}, {'id': 12854, 'synset': 'horehound.n.02', 'name': 'horehound'}, {'id': 12855, 'synset': 'peppermint.n.03', 'name': 'peppermint'}, {'id': 12856, 'synset': 'kiss.n.03', 'name': 'kiss'}, {'id': 12857, 'synset': 'molasses_kiss.n.01', 'name': 'molasses_kiss'}, {'id': 12858, 'synset': 'meringue_kiss.n.01', 'name': 'meringue_kiss'}, {'id': 12859, 'synset': 'chocolate_kiss.n.01', 'name': 'chocolate_kiss'}, {'id': 12860, 'synset': 'licorice.n.02', 'name': 'licorice'}, {'id': 12861, 'synset': 'life_saver.n.01', 'name': 'Life_Saver'}, {'id': 12862, 'synset': 'lozenge.n.01', 'name': 'lozenge'}, {'id': 12863, 'synset': 'cachou.n.01', 'name': 'cachou'}, {'id': 12864, 'synset': 'cough_drop.n.01', 'name': 'cough_drop'}, {'id': 12865, 'synset': 'marshmallow.n.01', 'name': 'marshmallow'}, {'id': 12866, 'synset': 'marzipan.n.01', 'name': 'marzipan'}, {'id': 12867, 'synset': 'nougat.n.01', 'name': 'nougat'}, {'id': 12868, 'synset': 'nougat_bar.n.01', 'name': 'nougat_bar'}, {'id': 12869, 'synset': 'nut_bar.n.01', 'name': 'nut_bar'}, {'id': 12870, 'synset': 'peanut_bar.n.01', 'name': 'peanut_bar'}, {'id': 12871, 'synset': 'popcorn_ball.n.01', 'name': 'popcorn_ball'}, {'id': 12872, 'synset': 'praline.n.01', 'name': 'praline'}, {'id': 12873, 'synset': 'rock_candy.n.02', 'name': 'rock_candy'}, {'id': 12874, 'synset': 'rock_candy.n.01', 'name': 'rock_candy'}, {'id': 12875, 'synset': 'sugar_candy.n.01', 'name': 'sugar_candy'}, {'id': 12876, 'synset': 'sugarplum.n.01', 'name': 'sugarplum'}, {'id': 12877, 'synset': 'taffy.n.01', 'name': 'taffy'}, {'id': 12878, 'synset': 'molasses_taffy.n.01', 'name': 'molasses_taffy'}, {'id': 12879, 'synset': 'turkish_delight.n.01', 'name': 'Turkish_Delight'}, {'id': 12880, 'synset': 'dessert.n.01', 'name': 'dessert'}, {'id': 12881, 'synset': 'ambrosia.n.04', 'name': 'ambrosia'}, {'id': 12882, 'synset': 'ambrosia.n.03', 'name': 'ambrosia'}, {'id': 12883, 'synset': 'baked_alaska.n.01', 'name': 'baked_Alaska'}, {'id': 12884, 'synset': 'blancmange.n.01', 'name': 'blancmange'}, {'id': 12885, 'synset': 'charlotte.n.02', 'name': 'charlotte'}, {'id': 12886, 'synset': 'compote.n.01', 'name': 'compote'}, {'id': 12887, 'synset': 'dumpling.n.02', 'name': 'dumpling'}, {'id': 12888, 'synset': 'flan.n.01', 'name': 'flan'}, {'id': 12889, 'synset': 'frozen_dessert.n.01', 'name': 'frozen_dessert'}, {'id': 12890, 'synset': 'junket.n.01', 'name': 'junket'}, {'id': 12891, 'synset': 'mousse.n.02', 'name': 'mousse'}, {'id': 12892, 'synset': 'mousse.n.01', 'name': 'mousse'}, {'id': 12893, 'synset': 'pavlova.n.02', 'name': 'pavlova'}, {'id': 12894, 'synset': 'peach_melba.n.01', 'name': 'peach_melba'}, {'id': 12895, 'synset': 'whip.n.03', 'name': 'whip'}, {'id': 12896, 'synset': 'prune_whip.n.01', 'name': 'prune_whip'}, {'id': 12897, 'synset': 'pudding.n.03', 'name': 'pudding'}, {'id': 12898, 'synset': 'pudding.n.02', 'name': 'pudding'}, {'id': 12899, 'synset': 'syllabub.n.02', 'name': 'syllabub'}, {'id': 12900, 'synset': 'tiramisu.n.01', 'name': 'tiramisu'}, {'id': 12901, 'synset': 'trifle.n.01', 'name': 'trifle'}, {'id': 12902, 'synset': 'tipsy_cake.n.01', 'name': 'tipsy_cake'}, {'id': 12903, 'synset': 'jello.n.01', 'name': 'jello'}, {'id': 12904, 'synset': 'apple_dumpling.n.01', 'name': 'apple_dumpling'}, {'id': 12905, 'synset': 'ice.n.05', 'name': 'ice'}, {'id': 12906, 'synset': 'water_ice.n.02', 'name': 'water_ice'}, {'id': 12907, 'synset': 'ice-cream_cone.n.01', 'name': 'ice-cream_cone'}, {'id': 12908, 'synset': 'chocolate_ice_cream.n.01', 'name': 'chocolate_ice_cream'}, {'id': 12909, 'synset': 'neapolitan_ice_cream.n.01', 'name': 'Neapolitan_ice_cream'}, {'id': 12910, 'synset': 'peach_ice_cream.n.01', 'name': 'peach_ice_cream'}, {'id': 12911, 'synset': 'strawberry_ice_cream.n.01', 'name': 'strawberry_ice_cream'}, {'id': 12912, 'synset': 'tutti-frutti.n.01', 'name': 'tutti-frutti'}, {'id': 12913, 'synset': 'vanilla_ice_cream.n.01', 'name': 'vanilla_ice_cream'}, {'id': 12914, 'synset': 'ice_milk.n.01', 'name': 'ice_milk'}, {'id': 12915, 'synset': 'frozen_yogurt.n.01', 'name': 'frozen_yogurt'}, {'id': 12916, 'synset': 'snowball.n.03', 'name': 'snowball'}, {'id': 12917, 'synset': 'snowball.n.02', 'name': 'snowball'}, {'id': 12918, 'synset': 'parfait.n.01', 'name': 'parfait'}, {'id': 12919, 'synset': 'ice-cream_sundae.n.01', 'name': 'ice-cream_sundae'}, {'id': 12920, 'synset': 'split.n.07', 'name': 'split'}, {'id': 12921, 'synset': 'banana_split.n.01', 'name': 'banana_split'}, {'id': 12922, 'synset': 'frozen_pudding.n.01', 'name': 'frozen_pudding'}, {'id': 12923, 'synset': 'frozen_custard.n.01', 'name': 'frozen_custard'}, {'id': 12924, 'synset': 'flummery.n.01', 'name': 'flummery'}, {'id': 12925, 'synset': 'fish_mousse.n.01', 'name': 'fish_mousse'}, {'id': 12926, 'synset': 'chicken_mousse.n.01', 'name': 'chicken_mousse'}, {'id': 12927, 'synset': 'plum_pudding.n.01', 'name': 'plum_pudding'}, {'id': 12928, 'synset': 'carrot_pudding.n.01', 'name': 'carrot_pudding'}, {'id': 12929, 'synset': 'corn_pudding.n.01', 'name': 'corn_pudding'}, {'id': 12930, 'synset': 'steamed_pudding.n.01', 'name': 'steamed_pudding'}, {'id': 12931, 'synset': 'duff.n.01', 'name': 'duff'}, {'id': 12932, 'synset': 'vanilla_pudding.n.01', 'name': 'vanilla_pudding'}, {'id': 12933, 'synset': 'chocolate_pudding.n.01', 'name': 'chocolate_pudding'}, {'id': 12934, 'synset': 'brown_betty.n.01', 'name': 'brown_Betty'}, {'id': 12935, 'synset': 'nesselrode.n.01', 'name': 'Nesselrode'}, {'id': 12936, 'synset': 'pease_pudding.n.01', 'name': 'pease_pudding'}, {'id': 12937, 'synset': 'custard.n.01', 'name': 'custard'}, {'id': 12938, 'synset': 'creme_caramel.n.01', 'name': 'creme_caramel'}, {'id': 12939, 'synset': 'creme_anglais.n.01', 'name': 'creme_anglais'}, {'id': 12940, 'synset': 'creme_brulee.n.01', 'name': 'creme_brulee'}, {'id': 12941, 'synset': 'fruit_custard.n.01', 'name': 'fruit_custard'}, {'id': 12942, 'synset': 'tapioca.n.01', 'name': 'tapioca'}, {'id': 12943, 'synset': 'tapioca_pudding.n.01', 'name': 'tapioca_pudding'}, {'id': 12944, 'synset': 'roly-poly.n.02', 'name': 'roly-poly'}, {'id': 12945, 'synset': 'suet_pudding.n.01', 'name': 'suet_pudding'}, {'id': 12946, 'synset': 'bavarian_cream.n.01', 'name': 'Bavarian_cream'}, {'id': 12947, 'synset': 'maraschino.n.02', 'name': 'maraschino'}, {'id': 12948, 'synset': 'nonpareil.n.02', 'name': 'nonpareil'}, {'id': 12949, 'synset': 'zabaglione.n.01', 'name': 'zabaglione'}, {'id': 12950, 'synset': 'garnish.n.01', 'name': 'garnish'}, {'id': 12951, 'synset': 'pastry.n.01', 'name': 'pastry'}, {'id': 12952, 'synset': 'turnover.n.02', 'name': 'turnover'}, {'id': 12953, 'synset': 'apple_turnover.n.01', 'name': 'apple_turnover'}, {'id': 12954, 'synset': 'knish.n.01', 'name': 'knish'}, {'id': 12955, 'synset': 'pirogi.n.01', 'name': 'pirogi'}, {'id': 12956, 'synset': 'samosa.n.01', 'name': 'samosa'}, {'id': 12957, 'synset': 'timbale.n.01', 'name': 'timbale'}, {'id': 12958, 'synset': 'puff_paste.n.01', 'name': 'puff_paste'}, {'id': 12959, 'synset': 'phyllo.n.01', 'name': 'phyllo'}, {'id': 12960, 'synset': 'puff_batter.n.01', 'name': 'puff_batter'}, {'id': 12961, 'synset': 'ice-cream_cake.n.01', 'name': 'ice-cream_cake'}, {'id': 12962, 'synset': 'fish_cake.n.01', 'name': 'fish_cake'}, {'id': 12963, 'synset': 'fish_stick.n.01', 'name': 'fish_stick'}, {'id': 12964, 'synset': 'conserve.n.01', 'name': 'conserve'}, {'id': 12965, 'synset': 'apple_butter.n.01', 'name': 'apple_butter'}, {'id': 12966, 'synset': 'chowchow.n.02', 'name': 'chowchow'}, {'id': 12967, 'synset': 'lemon_curd.n.01', 'name': 'lemon_curd'}, {'id': 12968, 'synset': 'strawberry_jam.n.01', 'name': 'strawberry_jam'}, {'id': 12969, 'synset': 'jelly.n.02', 'name': 'jelly'}, {'id': 12970, 'synset': 'apple_jelly.n.01', 'name': 'apple_jelly'}, {'id': 12971, 'synset': 'crabapple_jelly.n.01', 'name': 'crabapple_jelly'}, {'id': 12972, 'synset': 'grape_jelly.n.01', 'name': 'grape_jelly'}, {'id': 12973, 'synset': 'marmalade.n.01', 'name': 'marmalade'}, {'id': 12974, 'synset': 'orange_marmalade.n.01', 'name': 'orange_marmalade'}, {'id': 12975, 'synset': 'gelatin_dessert.n.01', 'name': 'gelatin_dessert'}, {'id': 12976, 'synset': 'buffalo_wing.n.01', 'name': 'buffalo_wing'}, {'id': 12977, 'synset': 'barbecued_wing.n.01', 'name': 'barbecued_wing'}, {'id': 12978, 'synset': 'mess.n.03', 'name': 'mess'}, {'id': 12979, 'synset': 'mince.n.01', 'name': 'mince'}, {'id': 12980, 'synset': 'puree.n.01', 'name': 'puree'}, {'id': 12981, 'synset': 'barbecue.n.01', 'name': 'barbecue'}, {'id': 12982, 'synset': 'biryani.n.01', 'name': 'biryani'}, {'id': 12983, 'synset': 'escalope_de_veau_orloff.n.01', 'name': 'escalope_de_veau_Orloff'}, {'id': 12984, 'synset': 'saute.n.01', 'name': 'saute'}, {'id': 12985, 'synset': 'veal_parmesan.n.01', 'name': 'veal_parmesan'}, {'id': 12986, 'synset': 'veal_cordon_bleu.n.01', 'name': 'veal_cordon_bleu'}, {'id': 12987, 'synset': 'margarine.n.01', 'name': 'margarine'}, {'id': 12988, 'synset': 'mincemeat.n.01', 'name': 'mincemeat'}, {'id': 12989, 'synset': 'stuffing.n.01', 'name': 'stuffing'}, {'id': 12990, 'synset': 'turkey_stuffing.n.01', 'name': 'turkey_stuffing'}, {'id': 12991, 'synset': 'oyster_stuffing.n.01', 'name': 'oyster_stuffing'}, {'id': 12992, 'synset': 'forcemeat.n.01', 'name': 'forcemeat'}, {'id': 12993, 'synset': 'anadama_bread.n.01', 'name': 'anadama_bread'}, {'id': 12994, 'synset': 'bap.n.01', 'name': 'bap'}, {'id': 12995, 'synset': 'barmbrack.n.01', 'name': 'barmbrack'}, {'id': 12996, 'synset': 'breadstick.n.01', 'name': 'breadstick'}, {'id': 12997, 'synset': 'grissino.n.01', 'name': 'grissino'}, {'id': 12998, 'synset': 'brown_bread.n.02', 'name': 'brown_bread'}, {'id': 12999, 'synset': 'tea_bread.n.01', 'name': 'tea_bread'}, {'id': 13000, 'synset': 'caraway_seed_bread.n.01', 'name': 'caraway_seed_bread'}, {'id': 13001, 'synset': 'challah.n.01', 'name': 'challah'}, {'id': 13002, 'synset': 'cinnamon_bread.n.01', 'name': 'cinnamon_bread'}, {'id': 13003, 'synset': 'cracked-wheat_bread.n.01', 'name': 'cracked-wheat_bread'}, {'id': 13004, 'synset': 'dark_bread.n.01', 'name': 'dark_bread'}, {'id': 13005, 'synset': 'english_muffin.n.01', 'name': 'English_muffin'}, {'id': 13006, 'synset': 'flatbread.n.01', 'name': 'flatbread'}, {'id': 13007, 'synset': 'garlic_bread.n.01', 'name': 'garlic_bread'}, {'id': 13008, 'synset': 'gluten_bread.n.01', 'name': 'gluten_bread'}, {'id': 13009, 'synset': 'graham_bread.n.01', 'name': 'graham_bread'}, {'id': 13010, 'synset': 'host.n.09', 'name': 'Host'}, {'id': 13011, 'synset': 'flatbrod.n.01', 'name': 'flatbrod'}, {'id': 13012, 'synset': 'bannock.n.01', 'name': 'bannock'}, {'id': 13013, 'synset': 'chapatti.n.01', 'name': 'chapatti'}, {'id': 13014, 'synset': 'loaf_of_bread.n.01', 'name': 'loaf_of_bread'}, {'id': 13015, 'synset': 'french_loaf.n.01', 'name': 'French_loaf'}, {'id': 13016, 'synset': 'matzo.n.01', 'name': 'matzo'}, {'id': 13017, 'synset': 'nan.n.04', 'name': 'nan'}, {'id': 13018, 'synset': 'onion_bread.n.01', 'name': 'onion_bread'}, {'id': 13019, 'synset': 'raisin_bread.n.01', 'name': 'raisin_bread'}, {'id': 13020, 'synset': 'quick_bread.n.01', 'name': 'quick_bread'}, {'id': 13021, 'synset': 'banana_bread.n.01', 'name': 'banana_bread'}, {'id': 13022, 'synset': 'date_bread.n.01', 'name': 'date_bread'}, {'id': 13023, 'synset': 'date-nut_bread.n.01', 'name': 'date-nut_bread'}, {'id': 13024, 'synset': 'nut_bread.n.01', 'name': 'nut_bread'}, {'id': 13025, 'synset': 'oatcake.n.01', 'name': 'oatcake'}, {'id': 13026, 'synset': 'irish_soda_bread.n.01', 'name': 'Irish_soda_bread'}, {'id': 13027, 'synset': 'skillet_bread.n.01', 'name': 'skillet_bread'}, {'id': 13028, 'synset': 'rye_bread.n.01', 'name': 'rye_bread'}, {'id': 13029, 'synset': 'black_bread.n.01', 'name': 'black_bread'}, {'id': 13030, 'synset': 'jewish_rye_bread.n.01', 'name': 'Jewish_rye_bread'}, {'id': 13031, 'synset': 'limpa.n.01', 'name': 'limpa'}, {'id': 13032, 'synset': 'swedish_rye_bread.n.01', 'name': 'Swedish_rye_bread'}, {'id': 13033, 'synset': 'salt-rising_bread.n.01', 'name': 'salt-rising_bread'}, {'id': 13034, 'synset': 'simnel.n.01', 'name': 'simnel'}, {'id': 13035, 'synset': 'sour_bread.n.01', 'name': 'sour_bread'}, {'id': 13036, 'synset': 'wafer.n.03', 'name': 'wafer'}, {'id': 13037, 'synset': 'white_bread.n.01', 'name': 'white_bread'}, {'id': 13038, 'synset': 'french_bread.n.01', 'name': 'French_bread'}, {'id': 13039, 'synset': 'italian_bread.n.01', 'name': 'Italian_bread'}, {'id': 13040, 'synset': 'corn_cake.n.01', 'name': 'corn_cake'}, {'id': 13041, 'synset': 'skillet_corn_bread.n.01', 'name': 'skillet_corn_bread'}, {'id': 13042, 'synset': 'ashcake.n.01', 'name': 'ashcake'}, {'id': 13043, 'synset': 'hoecake.n.01', 'name': 'hoecake'}, {'id': 13044, 'synset': 'cornpone.n.01', 'name': 'cornpone'}, {'id': 13045, 'synset': 'corn_dab.n.01', 'name': 'corn_dab'}, {'id': 13046, 'synset': 'hush_puppy.n.01', 'name': 'hush_puppy'}, {'id': 13047, 'synset': 'johnnycake.n.01', 'name': 'johnnycake'}, {'id': 13048, 'synset': 'shawnee_cake.n.01', 'name': 'Shawnee_cake'}, {'id': 13049, 'synset': 'spoon_bread.n.01', 'name': 'spoon_bread'}, {'id': 13050, 'synset': 'cinnamon_toast.n.01', 'name': 'cinnamon_toast'}, {'id': 13051, 'synset': 'orange_toast.n.01', 'name': 'orange_toast'}, {'id': 13052, 'synset': 'melba_toast.n.01', 'name': 'Melba_toast'}, {'id': 13053, 'synset': 'zwieback.n.01', 'name': 'zwieback'}, {'id': 13054, 'synset': 'frankfurter_bun.n.01', 'name': 'frankfurter_bun'}, {'id': 13055, 'synset': 'hamburger_bun.n.01', 'name': 'hamburger_bun'}, {'id': 13056, 'synset': 'bran_muffin.n.01', 'name': 'bran_muffin'}, {'id': 13057, 'synset': 'corn_muffin.n.01', 'name': 'corn_muffin'}, {'id': 13058, 'synset': 'yorkshire_pudding.n.01', 'name': 'Yorkshire_pudding'}, {'id': 13059, 'synset': 'popover.n.01', 'name': 'popover'}, {'id': 13060, 'synset': 'scone.n.01', 'name': 'scone'}, {'id': 13061, 'synset': 'drop_scone.n.01', 'name': 'drop_scone'}, {'id': 13062, 'synset': 'cross_bun.n.01', 'name': 'cross_bun'}, {'id': 13063, 'synset': 'brioche.n.01', 'name': 'brioche'}, {'id': 13064, 'synset': 'hard_roll.n.01', 'name': 'hard_roll'}, {'id': 13065, 'synset': 'soft_roll.n.01', 'name': 'soft_roll'}, {'id': 13066, 'synset': 'kaiser_roll.n.01', 'name': 'kaiser_roll'}, {'id': 13067, 'synset': 'parker_house_roll.n.01', 'name': 'Parker_House_roll'}, {'id': 13068, 'synset': 'clover-leaf_roll.n.01', 'name': 'clover-leaf_roll'}, {'id': 13069, 'synset': 'onion_roll.n.01', 'name': 'onion_roll'}, {'id': 13070, 'synset': 'bialy.n.01', 'name': 'bialy'}, {'id': 13071, 'synset': 'sweet_roll.n.01', 'name': 'sweet_roll'}, {'id': 13072, 'synset': 'bear_claw.n.01', 'name': 'bear_claw'}, {'id': 13073, 'synset': 'cinnamon_roll.n.01', 'name': 'cinnamon_roll'}, {'id': 13074, 'synset': 'honey_bun.n.01', 'name': 'honey_bun'}, {'id': 13075, 'synset': 'pinwheel_roll.n.01', 'name': 'pinwheel_roll'}, {'id': 13076, 'synset': 'danish.n.02', 'name': 'danish'}, {'id': 13077, 'synset': 'onion_bagel.n.01', 'name': 'onion_bagel'}, {'id': 13078, 'synset': 'biscuit.n.01', 'name': 'biscuit'}, {'id': 13079, 'synset': 'rolled_biscuit.n.01', 'name': 'rolled_biscuit'}, {'id': 13080, 'synset': 'baking-powder_biscuit.n.01', 'name': 'baking-powder_biscuit'}, {'id': 13081, 'synset': 'buttermilk_biscuit.n.01', 'name': 'buttermilk_biscuit'}, {'id': 13082, 'synset': 'shortcake.n.01', 'name': 'shortcake'}, {'id': 13083, 'synset': 'hardtack.n.01', 'name': 'hardtack'}, {'id': 13084, 'synset': 'saltine.n.01', 'name': 'saltine'}, {'id': 13085, 'synset': 'soda_cracker.n.01', 'name': 'soda_cracker'}, {'id': 13086, 'synset': 'oyster_cracker.n.01', 'name': 'oyster_cracker'}, {'id': 13087, 'synset': 'water_biscuit.n.01', 'name': 'water_biscuit'}, {'id': 13088, 'synset': 'graham_cracker.n.01', 'name': 'graham_cracker'}, {'id': 13089, 'synset': 'soft_pretzel.n.01', 'name': 'soft_pretzel'}, {'id': 13090, 'synset': 'sandwich_plate.n.01', 'name': 'sandwich_plate'}, {'id': 13091, 'synset': 'butty.n.01', 'name': 'butty'}, {'id': 13092, 'synset': 'ham_sandwich.n.01', 'name': 'ham_sandwich'}, {'id': 13093, 'synset': 'chicken_sandwich.n.01', 'name': 'chicken_sandwich'}, {'id': 13094, 'synset': 'club_sandwich.n.01', 'name': 'club_sandwich'}, {'id': 13095, 'synset': 'open-face_sandwich.n.01', 'name': 'open-face_sandwich'}, {'id': 13096, 'synset': 'cheeseburger.n.01', 'name': 'cheeseburger'}, {'id': 13097, 'synset': 'tunaburger.n.01', 'name': 'tunaburger'}, {'id': 13098, 'synset': 'hotdog.n.02', 'name': 'hotdog'}, {'id': 13099, 'synset': 'sloppy_joe.n.01', 'name': 'Sloppy_Joe'}, {'id': 13100, 'synset': 'bomber.n.03', 'name': 'bomber'}, {'id': 13101, 'synset': 'gyro.n.01', 'name': 'gyro'}, {'id': 13102, 'synset': 'bacon-lettuce-tomato_sandwich.n.01', 'name': 'bacon-lettuce-tomato_sandwich'}, {'id': 13103, 'synset': 'reuben.n.02', 'name': 'Reuben'}, {'id': 13104, 'synset': 'western.n.02', 'name': 'western'}, {'id': 13105, 'synset': 'wrap.n.02', 'name': 'wrap'}, {'id': 13106, 'synset': 'spaghetti.n.01', 'name': 'spaghetti'}, {'id': 13107, 'synset': 'hasty_pudding.n.01', 'name': 'hasty_pudding'}, {'id': 13108, 'synset': 'gruel.n.01', 'name': 'gruel'}, {'id': 13109, 'synset': 'congee.n.01', 'name': 'congee'}, {'id': 13110, 'synset': 'skilly.n.01', 'name': 'skilly'}, {'id': 13111, 'synset': 'edible_fruit.n.01', 'name': 'edible_fruit'}, {'id': 13112, 'synset': 'vegetable.n.01', 'name': 'vegetable'}, {'id': 13113, 'synset': 'julienne.n.01', 'name': 'julienne'}, {'id': 13114, 'synset': 'raw_vegetable.n.01', 'name': 'raw_vegetable'}, {'id': 13115, 'synset': 'crudites.n.01', 'name': 'crudites'}, {'id': 13116, 'synset': 'celery_stick.n.01', 'name': 'celery_stick'}, {'id': 13117, 'synset': 'legume.n.03', 'name': 'legume'}, {'id': 13118, 'synset': 'pulse.n.04', 'name': 'pulse'}, {'id': 13119, 'synset': 'potherb.n.01', 'name': 'potherb'}, {'id': 13120, 'synset': 'greens.n.01', 'name': 'greens'}, {'id': 13121, 'synset': 'chop-suey_greens.n.02', 'name': 'chop-suey_greens'}, {'id': 13122, 'synset': 'solanaceous_vegetable.n.01', 'name': 'solanaceous_vegetable'}, {'id': 13123, 'synset': 'root_vegetable.n.01', 'name': 'root_vegetable'}, {'id': 13124, 'synset': 'baked_potato.n.01', 'name': 'baked_potato'}, {'id': 13125, 'synset': 'french_fries.n.01', 'name': 'french_fries'}, {'id': 13126, 'synset': 'home_fries.n.01', 'name': 'home_fries'}, {'id': 13127, 'synset': 'jacket_potato.n.01', 'name': 'jacket_potato'}, {'id': 13128, 'synset': 'potato_skin.n.01', 'name': 'potato_skin'}, {'id': 13129, 'synset': 'uruguay_potato.n.02', 'name': 'Uruguay_potato'}, {'id': 13130, 'synset': 'yam.n.04', 'name': 'yam'}, {'id': 13131, 'synset': 'yam.n.03', 'name': 'yam'}, {'id': 13132, 'synset': 'snack_food.n.01', 'name': 'snack_food'}, {'id': 13133, 'synset': 'corn_chip.n.01', 'name': 'corn_chip'}, {'id': 13134, 'synset': 'tortilla_chip.n.01', 'name': 'tortilla_chip'}, {'id': 13135, 'synset': 'nacho.n.01', 'name': 'nacho'}, {'id': 13136, 'synset': 'pieplant.n.01', 'name': 'pieplant'}, {'id': 13137, 'synset': 'cruciferous_vegetable.n.01', 'name': 'cruciferous_vegetable'}, {'id': 13138, 'synset': 'mustard.n.03', 'name': 'mustard'}, {'id': 13139, 'synset': 'cabbage.n.01', 'name': 'cabbage'}, {'id': 13140, 'synset': 'kale.n.03', 'name': 'kale'}, {'id': 13141, 'synset': 'collards.n.01', 'name': 'collards'}, {'id': 13142, 'synset': 'chinese_cabbage.n.02', 'name': 'Chinese_cabbage'}, {'id': 13143, 'synset': 'bok_choy.n.02', 'name': 'bok_choy'}, {'id': 13144, 'synset': 'head_cabbage.n.02', 'name': 'head_cabbage'}, {'id': 13145, 'synset': 'red_cabbage.n.02', 'name': 'red_cabbage'}, {'id': 13146, 'synset': 'savoy_cabbage.n.02', 'name': 'savoy_cabbage'}, {'id': 13147, 'synset': 'broccoli.n.02', 'name': 'broccoli'}, {'id': 13148, 'synset': 'broccoli_rabe.n.02', 'name': 'broccoli_rabe'}, {'id': 13149, 'synset': 'squash.n.02', 'name': 'squash'}, {'id': 13150, 'synset': 'summer_squash.n.02', 'name': 'summer_squash'}, {'id': 13151, 'synset': 'yellow_squash.n.02', 'name': 'yellow_squash'}, {'id': 13152, 'synset': 'crookneck.n.01', 'name': 'crookneck'}, {'id': 13153, 'synset': 'marrow.n.04', 'name': 'marrow'}, {'id': 13154, 'synset': 'cocozelle.n.02', 'name': 'cocozelle'}, {'id': 13155, 'synset': 'pattypan_squash.n.02', 'name': 'pattypan_squash'}, {'id': 13156, 'synset': 'spaghetti_squash.n.02', 'name': 'spaghetti_squash'}, {'id': 13157, 'synset': 'winter_squash.n.02', 'name': 'winter_squash'}, {'id': 13158, 'synset': 'acorn_squash.n.02', 'name': 'acorn_squash'}, {'id': 13159, 'synset': 'butternut_squash.n.02', 'name': 'butternut_squash'}, {'id': 13160, 'synset': 'hubbard_squash.n.02', 'name': 'hubbard_squash'}, {'id': 13161, 'synset': 'turban_squash.n.02', 'name': 'turban_squash'}, {'id': 13162, 'synset': 'buttercup_squash.n.02', 'name': 'buttercup_squash'}, {'id': 13163, 'synset': 'cushaw.n.02', 'name': 'cushaw'}, {'id': 13164, 'synset': 'winter_crookneck_squash.n.02', 'name': 'winter_crookneck_squash'}, {'id': 13165, 'synset': 'gherkin.n.02', 'name': 'gherkin'}, {'id': 13166, 'synset': 'artichoke_heart.n.01', 'name': 'artichoke_heart'}, {'id': 13167, 'synset': 'jerusalem_artichoke.n.03', 'name': 'Jerusalem_artichoke'}, {'id': 13168, 'synset': 'bamboo_shoot.n.01', 'name': 'bamboo_shoot'}, {'id': 13169, 'synset': 'sprout.n.02', 'name': 'sprout'}, {'id': 13170, 'synset': 'bean_sprout.n.01', 'name': 'bean_sprout'}, {'id': 13171, 'synset': 'alfalfa_sprout.n.01', 'name': 'alfalfa_sprout'}, {'id': 13172, 'synset': 'beet.n.02', 'name': 'beet'}, {'id': 13173, 'synset': 'beet_green.n.01', 'name': 'beet_green'}, {'id': 13174, 'synset': 'sugar_beet.n.02', 'name': 'sugar_beet'}, {'id': 13175, 'synset': 'mangel-wurzel.n.02', 'name': 'mangel-wurzel'}, {'id': 13176, 'synset': 'chard.n.02', 'name': 'chard'}, {'id': 13177, 'synset': 'pepper.n.04', 'name': 'pepper'}, {'id': 13178, 'synset': 'sweet_pepper.n.02', 'name': 'sweet_pepper'}, {'id': 13179, 'synset': 'green_pepper.n.01', 'name': 'green_pepper'}, {'id': 13180, 'synset': 'globe_pepper.n.01', 'name': 'globe_pepper'}, {'id': 13181, 'synset': 'pimento.n.02', 'name': 'pimento'}, {'id': 13182, 'synset': 'hot_pepper.n.02', 'name': 'hot_pepper'}, {'id': 13183, 'synset': 'jalapeno.n.02', 'name': 'jalapeno'}, {'id': 13184, 'synset': 'chipotle.n.01', 'name': 'chipotle'}, {'id': 13185, 'synset': 'cayenne.n.03', 'name': 'cayenne'}, {'id': 13186, 'synset': 'tabasco.n.03', 'name': 'tabasco'}, {'id': 13187, 'synset': 'onion.n.03', 'name': 'onion'}, {'id': 13188, 'synset': 'bermuda_onion.n.01', 'name': 'Bermuda_onion'}, {'id': 13189, 'synset': 'vidalia_onion.n.01', 'name': 'Vidalia_onion'}, {'id': 13190, 'synset': 'spanish_onion.n.01', 'name': 'Spanish_onion'}, {'id': 13191, 'synset': 'purple_onion.n.01', 'name': 'purple_onion'}, {'id': 13192, 'synset': 'leek.n.02', 'name': 'leek'}, {'id': 13193, 'synset': 'shallot.n.03', 'name': 'shallot'}, {'id': 13194, 'synset': 'salad_green.n.01', 'name': 'salad_green'}, {'id': 13195, 'synset': 'lettuce.n.03', 'name': 'lettuce'}, {'id': 13196, 'synset': 'butterhead_lettuce.n.01', 'name': 'butterhead_lettuce'}, {'id': 13197, 'synset': 'buttercrunch.n.01', 'name': 'buttercrunch'}, {'id': 13198, 'synset': 'bibb_lettuce.n.01', 'name': 'Bibb_lettuce'}, {'id': 13199, 'synset': 'boston_lettuce.n.01', 'name': 'Boston_lettuce'}, {'id': 13200, 'synset': 'crisphead_lettuce.n.01', 'name': 'crisphead_lettuce'}, {'id': 13201, 'synset': 'cos.n.02', 'name': 'cos'}, {'id': 13202, 'synset': 'leaf_lettuce.n.02', 'name': 'leaf_lettuce'}, {'id': 13203, 'synset': 'celtuce.n.02', 'name': 'celtuce'}, {'id': 13204, 'synset': 'bean.n.01', 'name': 'bean'}, {'id': 13205, 'synset': 'goa_bean.n.02', 'name': 'goa_bean'}, {'id': 13206, 'synset': 'lentil.n.01', 'name': 'lentil'}, {'id': 13207, 'synset': 'green_pea.n.01', 'name': 'green_pea'}, {'id': 13208, 'synset': 'marrowfat_pea.n.01', 'name': 'marrowfat_pea'}, {'id': 13209, 'synset': 'snow_pea.n.02', 'name': 'snow_pea'}, {'id': 13210, 'synset': 'sugar_snap_pea.n.02', 'name': 'sugar_snap_pea'}, {'id': 13211, 'synset': 'split-pea.n.01', 'name': 'split-pea'}, {'id': 13212, 'synset': 'chickpea.n.03', 'name': 'chickpea'}, {'id': 13213, 'synset': 'cajan_pea.n.02', 'name': 'cajan_pea'}, {'id': 13214, 'synset': 'field_pea.n.03', 'name': 'field_pea'}, {'id': 13215, 'synset': 'mushy_peas.n.01', 'name': 'mushy_peas'}, {'id': 13216, 'synset': 'black-eyed_pea.n.03', 'name': 'black-eyed_pea'}, {'id': 13217, 'synset': 'common_bean.n.02', 'name': 'common_bean'}, {'id': 13218, 'synset': 'kidney_bean.n.02', 'name': 'kidney_bean'}, {'id': 13219, 'synset': 'navy_bean.n.01', 'name': 'navy_bean'}, {'id': 13220, 'synset': 'pinto_bean.n.01', 'name': 'pinto_bean'}, {'id': 13221, 'synset': 'frijole.n.02', 'name': 'frijole'}, {'id': 13222, 'synset': 'black_bean.n.01', 'name': 'black_bean'}, {'id': 13223, 'synset': 'fresh_bean.n.01', 'name': 'fresh_bean'}, {'id': 13224, 'synset': 'flageolet.n.01', 'name': 'flageolet'}, {'id': 13225, 'synset': 'green_bean.n.01', 'name': 'green_bean'}, {'id': 13226, 'synset': 'snap_bean.n.01', 'name': 'snap_bean'}, {'id': 13227, 'synset': 'string_bean.n.01', 'name': 'string_bean'}, {'id': 13228, 'synset': 'kentucky_wonder.n.01', 'name': 'Kentucky_wonder'}, {'id': 13229, 'synset': 'scarlet_runner.n.03', 'name': 'scarlet_runner'}, {'id': 13230, 'synset': 'haricot_vert.n.01', 'name': 'haricot_vert'}, {'id': 13231, 'synset': 'wax_bean.n.02', 'name': 'wax_bean'}, {'id': 13232, 'synset': 'shell_bean.n.02', 'name': 'shell_bean'}, {'id': 13233, 'synset': 'lima_bean.n.03', 'name': 'lima_bean'}, {'id': 13234, 'synset': 'fordhooks.n.01', 'name': 'Fordhooks'}, {'id': 13235, 'synset': 'sieva_bean.n.02', 'name': 'sieva_bean'}, {'id': 13236, 'synset': 'fava_bean.n.02', 'name': 'fava_bean'}, {'id': 13237, 'synset': 'soy.n.04', 'name': 'soy'}, {'id': 13238, 'synset': 'green_soybean.n.01', 'name': 'green_soybean'}, {'id': 13239, 'synset': 'field_soybean.n.01', 'name': 'field_soybean'}, {'id': 13240, 'synset': 'cardoon.n.02', 'name': 'cardoon'}, {'id': 13241, 'synset': 'carrot.n.03', 'name': 'carrot'}, {'id': 13242, 'synset': 'carrot_stick.n.01', 'name': 'carrot_stick'}, {'id': 13243, 'synset': 'celery.n.02', 'name': 'celery'}, {'id': 13244, 'synset': 'pascal_celery.n.01', 'name': 'pascal_celery'}, {'id': 13245, 'synset': 'celeriac.n.02', 'name': 'celeriac'}, {'id': 13246, 'synset': 'chicory.n.04', 'name': 'chicory'}, {'id': 13247, 'synset': 'radicchio.n.01', 'name': 'radicchio'}, {'id': 13248, 'synset': 'coffee_substitute.n.01', 'name': 'coffee_substitute'}, {'id': 13249, 'synset': 'chicory.n.03', 'name': 'chicory'}, {'id': 13250, 'synset': 'postum.n.01', 'name': 'Postum'}, {'id': 13251, 'synset': 'chicory_escarole.n.01', 'name': 'chicory_escarole'}, {'id': 13252, 'synset': 'belgian_endive.n.01', 'name': 'Belgian_endive'}, {'id': 13253, 'synset': 'sweet_corn.n.02', 'name': 'sweet_corn'}, {'id': 13254, 'synset': 'hominy.n.01', 'name': 'hominy'}, {'id': 13255, 'synset': 'lye_hominy.n.01', 'name': 'lye_hominy'}, {'id': 13256, 'synset': 'pearl_hominy.n.01', 'name': 'pearl_hominy'}, {'id': 13257, 'synset': 'popcorn.n.02', 'name': 'popcorn'}, {'id': 13258, 'synset': 'cress.n.02', 'name': 'cress'}, {'id': 13259, 'synset': 'watercress.n.02', 'name': 'watercress'}, {'id': 13260, 'synset': 'garden_cress.n.01', 'name': 'garden_cress'}, {'id': 13261, 'synset': 'winter_cress.n.02', 'name': 'winter_cress'}, {'id': 13262, 'synset': 'dandelion_green.n.02', 'name': 'dandelion_green'}, {'id': 13263, 'synset': 'gumbo.n.03', 'name': 'gumbo'}, {'id': 13264, 'synset': 'kohlrabi.n.02', 'name': 'kohlrabi'}, {'id': 13265, 'synset': "lamb's-quarter.n.01", 'name': "lamb's-quarter"}, {'id': 13266, 'synset': 'wild_spinach.n.03', 'name': 'wild_spinach'}, {'id': 13267, 'synset': 'beefsteak_tomato.n.01', 'name': 'beefsteak_tomato'}, {'id': 13268, 'synset': 'cherry_tomato.n.02', 'name': 'cherry_tomato'}, {'id': 13269, 'synset': 'plum_tomato.n.02', 'name': 'plum_tomato'}, {'id': 13270, 'synset': 'tomatillo.n.03', 'name': 'tomatillo'}, {'id': 13271, 'synset': 'mushroom.n.05', 'name': 'mushroom'}, {'id': 13272, 'synset': 'stuffed_mushroom.n.01', 'name': 'stuffed_mushroom'}, {'id': 13273, 'synset': 'salsify.n.03', 'name': 'salsify'}, {'id': 13274, 'synset': 'oyster_plant.n.03', 'name': 'oyster_plant'}, {'id': 13275, 'synset': 'scorzonera.n.02', 'name': 'scorzonera'}, {'id': 13276, 'synset': 'parsnip.n.03', 'name': 'parsnip'}, {'id': 13277, 'synset': 'radish.n.01', 'name': 'radish'}, {'id': 13278, 'synset': 'turnip.n.02', 'name': 'turnip'}, {'id': 13279, 'synset': 'white_turnip.n.02', 'name': 'white_turnip'}, {'id': 13280, 'synset': 'rutabaga.n.01', 'name': 'rutabaga'}, {'id': 13281, 'synset': 'turnip_greens.n.01', 'name': 'turnip_greens'}, {'id': 13282, 'synset': 'sorrel.n.04', 'name': 'sorrel'}, {'id': 13283, 'synset': 'french_sorrel.n.02', 'name': 'French_sorrel'}, {'id': 13284, 'synset': 'spinach.n.02', 'name': 'spinach'}, {'id': 13285, 'synset': 'taro.n.03', 'name': 'taro'}, {'id': 13286, 'synset': 'truffle.n.02', 'name': 'truffle'}, {'id': 13287, 'synset': 'edible_nut.n.01', 'name': 'edible_nut'}, {'id': 13288, 'synset': 'bunya_bunya.n.02', 'name': 'bunya_bunya'}, {'id': 13289, 'synset': 'peanut.n.04', 'name': 'peanut'}, {'id': 13290, 'synset': 'freestone.n.01', 'name': 'freestone'}, {'id': 13291, 'synset': 'cling.n.01', 'name': 'cling'}, {'id': 13292, 'synset': 'windfall.n.01', 'name': 'windfall'}, {'id': 13293, 'synset': 'crab_apple.n.03', 'name': 'crab_apple'}, {'id': 13294, 'synset': 'eating_apple.n.01', 'name': 'eating_apple'}, {'id': 13295, 'synset': 'baldwin.n.03', 'name': 'Baldwin'}, {'id': 13296, 'synset': 'cortland.n.01', 'name': 'Cortland'}, {'id': 13297, 'synset': "cox's_orange_pippin.n.01", 'name': "Cox's_Orange_Pippin"}, {'id': 13298, 'synset': 'delicious.n.01', 'name': 'Delicious'}, {'id': 13299, 'synset': 'golden_delicious.n.01', 'name': 'Golden_Delicious'}, {'id': 13300, 'synset': 'red_delicious.n.01', 'name': 'Red_Delicious'}, {'id': 13301, 'synset': 'empire.n.05', 'name': 'Empire'}, {'id': 13302, 'synset': "grimes'_golden.n.01", 'name': "Grimes'_golden"}, {'id': 13303, 'synset': 'jonathan.n.01', 'name': 'Jonathan'}, {'id': 13304, 'synset': 'mcintosh.n.01', 'name': 'McIntosh'}, {'id': 13305, 'synset': 'macoun.n.01', 'name': 'Macoun'}, {'id': 13306, 'synset': 'northern_spy.n.01', 'name': 'Northern_Spy'}, {'id': 13307, 'synset': 'pearmain.n.01', 'name': 'Pearmain'}, {'id': 13308, 'synset': 'pippin.n.01', 'name': 'Pippin'}, {'id': 13309, 'synset': 'prima.n.01', 'name': 'Prima'}, {'id': 13310, 'synset': 'stayman.n.01', 'name': 'Stayman'}, {'id': 13311, 'synset': 'winesap.n.01', 'name': 'Winesap'}, {'id': 13312, 'synset': 'stayman_winesap.n.01', 'name': 'Stayman_Winesap'}, {'id': 13313, 'synset': 'cooking_apple.n.01', 'name': 'cooking_apple'}, {'id': 13314, 'synset': "bramley's_seedling.n.01", 'name': "Bramley's_Seedling"}, {'id': 13315, 'synset': 'granny_smith.n.01', 'name': 'Granny_Smith'}, {'id': 13316, 'synset': "lane's_prince_albert.n.01", 'name': "Lane's_Prince_Albert"}, {'id': 13317, 'synset': 'newtown_wonder.n.01', 'name': 'Newtown_Wonder'}, {'id': 13318, 'synset': 'rome_beauty.n.01', 'name': 'Rome_Beauty'}, {'id': 13319, 'synset': 'berry.n.01', 'name': 'berry'}, {'id': 13320, 'synset': 'bilberry.n.03', 'name': 'bilberry'}, {'id': 13321, 'synset': 'huckleberry.n.03', 'name': 'huckleberry'}, {'id': 13322, 'synset': 'wintergreen.n.03', 'name': 'wintergreen'}, {'id': 13323, 'synset': 'cranberry.n.02', 'name': 'cranberry'}, {'id': 13324, 'synset': 'lingonberry.n.02', 'name': 'lingonberry'}, {'id': 13325, 'synset': 'currant.n.01', 'name': 'currant'}, {'id': 13326, 'synset': 'gooseberry.n.02', 'name': 'gooseberry'}, {'id': 13327, 'synset': 'black_currant.n.02', 'name': 'black_currant'}, {'id': 13328, 'synset': 'red_currant.n.02', 'name': 'red_currant'}, {'id': 13329, 'synset': 'boysenberry.n.02', 'name': 'boysenberry'}, {'id': 13330, 'synset': 'dewberry.n.02', 'name': 'dewberry'}, {'id': 13331, 'synset': 'loganberry.n.02', 'name': 'loganberry'}, {'id': 13332, 'synset': 'saskatoon.n.02', 'name': 'saskatoon'}, {'id': 13333, 'synset': 'sugarberry.n.02', 'name': 'sugarberry'}, {'id': 13334, 'synset': 'acerola.n.02', 'name': 'acerola'}, {'id': 13335, 'synset': 'carambola.n.02', 'name': 'carambola'}, {'id': 13336, 'synset': 'ceriman.n.02', 'name': 'ceriman'}, {'id': 13337, 'synset': 'carissa_plum.n.01', 'name': 'carissa_plum'}, {'id': 13338, 'synset': 'citrus.n.01', 'name': 'citrus'}, {'id': 13339, 'synset': 'temple_orange.n.02', 'name': 'temple_orange'}, {'id': 13340, 'synset': 'clementine.n.02', 'name': 'clementine'}, {'id': 13341, 'synset': 'satsuma.n.02', 'name': 'satsuma'}, {'id': 13342, 'synset': 'tangerine.n.02', 'name': 'tangerine'}, {'id': 13343, 'synset': 'tangelo.n.02', 'name': 'tangelo'}, {'id': 13344, 'synset': 'bitter_orange.n.02', 'name': 'bitter_orange'}, {'id': 13345, 'synset': 'sweet_orange.n.01', 'name': 'sweet_orange'}, {'id': 13346, 'synset': 'jaffa_orange.n.01', 'name': 'Jaffa_orange'}, {'id': 13347, 'synset': 'navel_orange.n.01', 'name': 'navel_orange'}, {'id': 13348, 'synset': 'valencia_orange.n.01', 'name': 'Valencia_orange'}, {'id': 13349, 'synset': 'kumquat.n.02', 'name': 'kumquat'}, {'id': 13350, 'synset': 'key_lime.n.01', 'name': 'key_lime'}, {'id': 13351, 'synset': 'grapefruit.n.02', 'name': 'grapefruit'}, {'id': 13352, 'synset': 'pomelo.n.02', 'name': 'pomelo'}, {'id': 13353, 'synset': 'citrange.n.02', 'name': 'citrange'}, {'id': 13354, 'synset': 'citron.n.01', 'name': 'citron'}, {'id': 13355, 'synset': 'jordan_almond.n.02', 'name': 'Jordan_almond'}, {'id': 13356, 'synset': 'nectarine.n.02', 'name': 'nectarine'}, {'id': 13357, 'synset': 'pitahaya.n.02', 'name': 'pitahaya'}, {'id': 13358, 'synset': 'plum.n.02', 'name': 'plum'}, {'id': 13359, 'synset': 'damson.n.01', 'name': 'damson'}, {'id': 13360, 'synset': 'greengage.n.01', 'name': 'greengage'}, {'id': 13361, 'synset': 'beach_plum.n.02', 'name': 'beach_plum'}, {'id': 13362, 'synset': 'sloe.n.03', 'name': 'sloe'}, {'id': 13363, 'synset': 'victoria_plum.n.01', 'name': 'Victoria_plum'}, {'id': 13364, 'synset': 'dried_fruit.n.01', 'name': 'dried_fruit'}, {'id': 13365, 'synset': 'dried_apricot.n.01', 'name': 'dried_apricot'}, {'id': 13366, 'synset': 'raisin.n.01', 'name': 'raisin'}, {'id': 13367, 'synset': 'seedless_raisin.n.01', 'name': 'seedless_raisin'}, {'id': 13368, 'synset': 'seeded_raisin.n.01', 'name': 'seeded_raisin'}, {'id': 13369, 'synset': 'currant.n.03', 'name': 'currant'}, {'id': 13370, 'synset': 'anchovy_pear.n.02', 'name': 'anchovy_pear'}, {'id': 13371, 'synset': 'passion_fruit.n.01', 'name': 'passion_fruit'}, {'id': 13372, 'synset': 'granadilla.n.04', 'name': 'granadilla'}, {'id': 13373, 'synset': 'sweet_calabash.n.02', 'name': 'sweet_calabash'}, {'id': 13374, 'synset': 'bell_apple.n.01', 'name': 'bell_apple'}, {'id': 13375, 'synset': 'breadfruit.n.02', 'name': 'breadfruit'}, {'id': 13376, 'synset': 'jackfruit.n.02', 'name': 'jackfruit'}, {'id': 13377, 'synset': 'cacao_bean.n.01', 'name': 'cacao_bean'}, {'id': 13378, 'synset': 'cocoa.n.02', 'name': 'cocoa'}, {'id': 13379, 'synset': 'canistel.n.02', 'name': 'canistel'}, {'id': 13380, 'synset': 'melon_ball.n.01', 'name': 'melon_ball'}, {'id': 13381, 'synset': 'muskmelon.n.02', 'name': 'muskmelon'}, {'id': 13382, 'synset': 'winter_melon.n.02', 'name': 'winter_melon'}, {'id': 13383, 'synset': 'honeydew.n.01', 'name': 'honeydew'}, {'id': 13384, 'synset': 'persian_melon.n.02', 'name': 'Persian_melon'}, {'id': 13385, 'synset': 'net_melon.n.02', 'name': 'net_melon'}, {'id': 13386, 'synset': 'casaba.n.01', 'name': 'casaba'}, {'id': 13387, 'synset': 'sweet_cherry.n.02', 'name': 'sweet_cherry'}, {'id': 13388, 'synset': 'bing_cherry.n.01', 'name': 'bing_cherry'}, {'id': 13389, 'synset': 'heart_cherry.n.02', 'name': 'heart_cherry'}, {'id': 13390, 'synset': 'blackheart.n.02', 'name': 'blackheart'}, {'id': 13391, 'synset': 'capulin.n.02', 'name': 'capulin'}, {'id': 13392, 'synset': 'sour_cherry.n.03', 'name': 'sour_cherry'}, {'id': 13393, 'synset': 'amarelle.n.02', 'name': 'amarelle'}, {'id': 13394, 'synset': 'morello.n.02', 'name': 'morello'}, {'id': 13395, 'synset': 'cocoa_plum.n.02', 'name': 'cocoa_plum'}, {'id': 13396, 'synset': 'gherkin.n.01', 'name': 'gherkin'}, {'id': 13397, 'synset': 'fox_grape.n.02', 'name': 'fox_grape'}, {'id': 13398, 'synset': 'concord_grape.n.01', 'name': 'Concord_grape'}, {'id': 13399, 'synset': 'catawba.n.02', 'name': 'Catawba'}, {'id': 13400, 'synset': 'muscadine.n.02', 'name': 'muscadine'}, {'id': 13401, 'synset': 'scuppernong.n.01', 'name': 'scuppernong'}, {'id': 13402, 'synset': 'slipskin_grape.n.01', 'name': 'slipskin_grape'}, {'id': 13403, 'synset': 'vinifera_grape.n.02', 'name': 'vinifera_grape'}, {'id': 13404, 'synset': 'emperor.n.02', 'name': 'emperor'}, {'id': 13405, 'synset': 'muscat.n.04', 'name': 'muscat'}, {'id': 13406, 'synset': 'ribier.n.01', 'name': 'ribier'}, {'id': 13407, 'synset': 'sultana.n.01', 'name': 'sultana'}, {'id': 13408, 'synset': 'tokay.n.02', 'name': 'Tokay'}, {'id': 13409, 'synset': 'flame_tokay.n.01', 'name': 'flame_tokay'}, {'id': 13410, 'synset': 'thompson_seedless.n.01', 'name': 'Thompson_Seedless'}, {'id': 13411, 'synset': 'custard_apple.n.02', 'name': 'custard_apple'}, {'id': 13412, 'synset': 'cherimoya.n.02', 'name': 'cherimoya'}, {'id': 13413, 'synset': 'soursop.n.02', 'name': 'soursop'}, {'id': 13414, 'synset': 'sweetsop.n.02', 'name': 'sweetsop'}, {'id': 13415, 'synset': 'ilama.n.02', 'name': 'ilama'}, {'id': 13416, 'synset': 'pond_apple.n.02', 'name': 'pond_apple'}, {'id': 13417, 'synset': 'papaw.n.02', 'name': 'papaw'}, {'id': 13418, 'synset': 'kai_apple.n.01', 'name': 'kai_apple'}, {'id': 13419, 'synset': 'ketembilla.n.02', 'name': 'ketembilla'}, {'id': 13420, 'synset': 'ackee.n.01', 'name': 'ackee'}, {'id': 13421, 'synset': 'durian.n.02', 'name': 'durian'}, {'id': 13422, 'synset': 'feijoa.n.02', 'name': 'feijoa'}, {'id': 13423, 'synset': 'genip.n.02', 'name': 'genip'}, {'id': 13424, 'synset': 'genipap.n.01', 'name': 'genipap'}, {'id': 13425, 'synset': 'loquat.n.02', 'name': 'loquat'}, {'id': 13426, 'synset': 'mangosteen.n.02', 'name': 'mangosteen'}, {'id': 13427, 'synset': 'mango.n.02', 'name': 'mango'}, {'id': 13428, 'synset': 'sapodilla.n.02', 'name': 'sapodilla'}, {'id': 13429, 'synset': 'sapote.n.02', 'name': 'sapote'}, {'id': 13430, 'synset': 'tamarind.n.02', 'name': 'tamarind'}, {'id': 13431, 'synset': 'elderberry.n.02', 'name': 'elderberry'}, {'id': 13432, 'synset': 'guava.n.03', 'name': 'guava'}, {'id': 13433, 'synset': 'mombin.n.02', 'name': 'mombin'}, {'id': 13434, 'synset': 'hog_plum.n.04', 'name': 'hog_plum'}, {'id': 13435, 'synset': 'hog_plum.n.03', 'name': 'hog_plum'}, {'id': 13436, 'synset': 'jaboticaba.n.02', 'name': 'jaboticaba'}, {'id': 13437, 'synset': 'jujube.n.02', 'name': 'jujube'}, {'id': 13438, 'synset': 'litchi.n.02', 'name': 'litchi'}, {'id': 13439, 'synset': 'longanberry.n.02', 'name': 'longanberry'}, {'id': 13440, 'synset': 'mamey.n.02', 'name': 'mamey'}, {'id': 13441, 'synset': 'marang.n.02', 'name': 'marang'}, {'id': 13442, 'synset': 'medlar.n.04', 'name': 'medlar'}, {'id': 13443, 'synset': 'medlar.n.03', 'name': 'medlar'}, {'id': 13444, 'synset': 'mulberry.n.02', 'name': 'mulberry'}, {'id': 13445, 'synset': 'olive.n.04', 'name': 'olive'}, {'id': 13446, 'synset': 'black_olive.n.01', 'name': 'black_olive'}, {'id': 13447, 'synset': 'green_olive.n.01', 'name': 'green_olive'}, {'id': 13448, 'synset': 'bosc.n.01', 'name': 'bosc'}, {'id': 13449, 'synset': 'anjou.n.02', 'name': 'anjou'}, {'id': 13450, 'synset': 'bartlett.n.03', 'name': 'bartlett'}, {'id': 13451, 'synset': 'seckel.n.01', 'name': 'seckel'}, {'id': 13452, 'synset': 'plantain.n.03', 'name': 'plantain'}, {'id': 13453, 'synset': 'plumcot.n.02', 'name': 'plumcot'}, {'id': 13454, 'synset': 'pomegranate.n.02', 'name': 'pomegranate'}, {'id': 13455, 'synset': 'prickly_pear.n.02', 'name': 'prickly_pear'}, {'id': 13456, 'synset': 'barbados_gooseberry.n.02', 'name': 'Barbados_gooseberry'}, {'id': 13457, 'synset': 'quandong.n.04', 'name': 'quandong'}, {'id': 13458, 'synset': 'quandong_nut.n.01', 'name': 'quandong_nut'}, {'id': 13459, 'synset': 'quince.n.02', 'name': 'quince'}, {'id': 13460, 'synset': 'rambutan.n.02', 'name': 'rambutan'}, {'id': 13461, 'synset': 'pulasan.n.02', 'name': 'pulasan'}, {'id': 13462, 'synset': 'rose_apple.n.02', 'name': 'rose_apple'}, {'id': 13463, 'synset': 'sorb.n.01', 'name': 'sorb'}, {'id': 13464, 'synset': 'sour_gourd.n.02', 'name': 'sour_gourd'}, {'id': 13465, 'synset': 'edible_seed.n.01', 'name': 'edible_seed'}, {'id': 13466, 'synset': 'pumpkin_seed.n.01', 'name': 'pumpkin_seed'}, {'id': 13467, 'synset': 'betel_nut.n.01', 'name': 'betel_nut'}, {'id': 13468, 'synset': 'beechnut.n.01', 'name': 'beechnut'}, {'id': 13469, 'synset': 'walnut.n.01', 'name': 'walnut'}, {'id': 13470, 'synset': 'black_walnut.n.02', 'name': 'black_walnut'}, {'id': 13471, 'synset': 'english_walnut.n.02', 'name': 'English_walnut'}, {'id': 13472, 'synset': 'brazil_nut.n.02', 'name': 'brazil_nut'}, {'id': 13473, 'synset': 'butternut.n.02', 'name': 'butternut'}, {'id': 13474, 'synset': 'souari_nut.n.02', 'name': 'souari_nut'}, {'id': 13475, 'synset': 'cashew.n.02', 'name': 'cashew'}, {'id': 13476, 'synset': 'chestnut.n.03', 'name': 'chestnut'}, {'id': 13477, 'synset': 'chincapin.n.01', 'name': 'chincapin'}, {'id': 13478, 'synset': 'hazelnut.n.02', 'name': 'hazelnut'}, {'id': 13479, 'synset': 'coconut_milk.n.02', 'name': 'coconut_milk'}, {'id': 13480, 'synset': 'grugru_nut.n.01', 'name': 'grugru_nut'}, {'id': 13481, 'synset': 'hickory_nut.n.01', 'name': 'hickory_nut'}, {'id': 13482, 'synset': 'cola_extract.n.01', 'name': 'cola_extract'}, {'id': 13483, 'synset': 'macadamia_nut.n.02', 'name': 'macadamia_nut'}, {'id': 13484, 'synset': 'pecan.n.03', 'name': 'pecan'}, {'id': 13485, 'synset': 'pine_nut.n.01', 'name': 'pine_nut'}, {'id': 13486, 'synset': 'pistachio.n.02', 'name': 'pistachio'}, {'id': 13487, 'synset': 'sunflower_seed.n.01', 'name': 'sunflower_seed'}, {'id': 13488, 'synset': 'anchovy_paste.n.01', 'name': 'anchovy_paste'}, {'id': 13489, 'synset': 'rollmops.n.01', 'name': 'rollmops'}, {'id': 13490, 'synset': 'feed.n.01', 'name': 'feed'}, {'id': 13491, 'synset': 'cattle_cake.n.01', 'name': 'cattle_cake'}, {'id': 13492, 'synset': 'creep_feed.n.01', 'name': 'creep_feed'}, {'id': 13493, 'synset': 'fodder.n.02', 'name': 'fodder'}, {'id': 13494, 'synset': 'feed_grain.n.01', 'name': 'feed_grain'}, {'id': 13495, 'synset': 'eatage.n.01', 'name': 'eatage'}, {'id': 13496, 'synset': 'silage.n.01', 'name': 'silage'}, {'id': 13497, 'synset': 'oil_cake.n.01', 'name': 'oil_cake'}, {'id': 13498, 'synset': 'oil_meal.n.01', 'name': 'oil_meal'}, {'id': 13499, 'synset': 'alfalfa.n.02', 'name': 'alfalfa'}, {'id': 13500, 'synset': 'broad_bean.n.03', 'name': 'broad_bean'}, {'id': 13501, 'synset': 'hay.n.01', 'name': 'hay'}, {'id': 13502, 'synset': 'timothy.n.03', 'name': 'timothy'}, {'id': 13503, 'synset': 'stover.n.01', 'name': 'stover'}, {'id': 13504, 'synset': 'grain.n.02', 'name': 'grain'}, {'id': 13505, 'synset': 'grist.n.01', 'name': 'grist'}, {'id': 13506, 'synset': 'groats.n.01', 'name': 'groats'}, {'id': 13507, 'synset': 'millet.n.03', 'name': 'millet'}, {'id': 13508, 'synset': 'barley.n.01', 'name': 'barley'}, {'id': 13509, 'synset': 'pearl_barley.n.01', 'name': 'pearl_barley'}, {'id': 13510, 'synset': 'buckwheat.n.02', 'name': 'buckwheat'}, {'id': 13511, 'synset': 'bulgur.n.01', 'name': 'bulgur'}, {'id': 13512, 'synset': 'wheat.n.02', 'name': 'wheat'}, {'id': 13513, 'synset': 'cracked_wheat.n.01', 'name': 'cracked_wheat'}, {'id': 13514, 'synset': 'stodge.n.01', 'name': 'stodge'}, {'id': 13515, 'synset': 'wheat_germ.n.01', 'name': 'wheat_germ'}, {'id': 13516, 'synset': 'oat.n.02', 'name': 'oat'}, {'id': 13517, 'synset': 'rice.n.01', 'name': 'rice'}, {'id': 13518, 'synset': 'brown_rice.n.01', 'name': 'brown_rice'}, {'id': 13519, 'synset': 'white_rice.n.01', 'name': 'white_rice'}, {'id': 13520, 'synset': 'wild_rice.n.02', 'name': 'wild_rice'}, {'id': 13521, 'synset': 'paddy.n.03', 'name': 'paddy'}, {'id': 13522, 'synset': 'slop.n.01', 'name': 'slop'}, {'id': 13523, 'synset': 'mash.n.02', 'name': 'mash'}, {'id': 13524, 'synset': 'chicken_feed.n.01', 'name': 'chicken_feed'}, {'id': 13525, 'synset': 'cud.n.01', 'name': 'cud'}, {'id': 13526, 'synset': 'bird_feed.n.01', 'name': 'bird_feed'}, {'id': 13527, 'synset': 'petfood.n.01', 'name': 'petfood'}, {'id': 13528, 'synset': 'dog_food.n.01', 'name': 'dog_food'}, {'id': 13529, 'synset': 'cat_food.n.01', 'name': 'cat_food'}, {'id': 13530, 'synset': 'canary_seed.n.01', 'name': 'canary_seed'}, {'id': 13531, 'synset': 'tossed_salad.n.01', 'name': 'tossed_salad'}, {'id': 13532, 'synset': 'green_salad.n.01', 'name': 'green_salad'}, {'id': 13533, 'synset': 'caesar_salad.n.01', 'name': 'Caesar_salad'}, {'id': 13534, 'synset': 'salmagundi.n.02', 'name': 'salmagundi'}, {'id': 13535, 'synset': 'salad_nicoise.n.01', 'name': 'salad_nicoise'}, {'id': 13536, 'synset': 'combination_salad.n.01', 'name': 'combination_salad'}, {'id': 13537, 'synset': "chef's_salad.n.01", 'name': "chef's_salad"}, {'id': 13538, 'synset': 'potato_salad.n.01', 'name': 'potato_salad'}, {'id': 13539, 'synset': 'pasta_salad.n.01', 'name': 'pasta_salad'}, {'id': 13540, 'synset': 'macaroni_salad.n.01', 'name': 'macaroni_salad'}, {'id': 13541, 'synset': 'fruit_salad.n.01', 'name': 'fruit_salad'}, {'id': 13542, 'synset': 'waldorf_salad.n.01', 'name': 'Waldorf_salad'}, {'id': 13543, 'synset': 'crab_louis.n.01', 'name': 'crab_Louis'}, {'id': 13544, 'synset': 'herring_salad.n.01', 'name': 'herring_salad'}, {'id': 13545, 'synset': 'tuna_fish_salad.n.01', 'name': 'tuna_fish_salad'}, {'id': 13546, 'synset': 'chicken_salad.n.01', 'name': 'chicken_salad'}, {'id': 13547, 'synset': 'aspic.n.01', 'name': 'aspic'}, {'id': 13548, 'synset': 'molded_salad.n.01', 'name': 'molded_salad'}, {'id': 13549, 'synset': 'tabbouleh.n.01', 'name': 'tabbouleh'}, {'id': 13550, 'synset': 'ingredient.n.03', 'name': 'ingredient'}, {'id': 13551, 'synset': 'flavorer.n.01', 'name': 'flavorer'}, {'id': 13552, 'synset': 'bouillon_cube.n.01', 'name': 'bouillon_cube'}, {'id': 13553, 'synset': 'herb.n.02', 'name': 'herb'}, {'id': 13554, 'synset': 'fines_herbes.n.01', 'name': 'fines_herbes'}, {'id': 13555, 'synset': 'spice.n.02', 'name': 'spice'}, {'id': 13556, 'synset': 'spearmint_oil.n.01', 'name': 'spearmint_oil'}, {'id': 13557, 'synset': 'lemon_oil.n.01', 'name': 'lemon_oil'}, {'id': 13558, 'synset': 'wintergreen_oil.n.01', 'name': 'wintergreen_oil'}, {'id': 13559, 'synset': 'salt.n.02', 'name': 'salt'}, {'id': 13560, 'synset': 'celery_salt.n.01', 'name': 'celery_salt'}, {'id': 13561, 'synset': 'onion_salt.n.01', 'name': 'onion_salt'}, {'id': 13562, 'synset': 'seasoned_salt.n.01', 'name': 'seasoned_salt'}, {'id': 13563, 'synset': 'sour_salt.n.01', 'name': 'sour_salt'}, {'id': 13564, 'synset': 'five_spice_powder.n.01', 'name': 'five_spice_powder'}, {'id': 13565, 'synset': 'allspice.n.03', 'name': 'allspice'}, {'id': 13566, 'synset': 'cinnamon.n.03', 'name': 'cinnamon'}, {'id': 13567, 'synset': 'stick_cinnamon.n.01', 'name': 'stick_cinnamon'}, {'id': 13568, 'synset': 'clove.n.04', 'name': 'clove'}, {'id': 13569, 'synset': 'cumin.n.02', 'name': 'cumin'}, {'id': 13570, 'synset': 'fennel.n.04', 'name': 'fennel'}, {'id': 13571, 'synset': 'ginger.n.02', 'name': 'ginger'}, {'id': 13572, 'synset': 'mace.n.03', 'name': 'mace'}, {'id': 13573, 'synset': 'nutmeg.n.02', 'name': 'nutmeg'}, {'id': 13574, 'synset': 'black_pepper.n.02', 'name': 'black_pepper'}, {'id': 13575, 'synset': 'white_pepper.n.02', 'name': 'white_pepper'}, {'id': 13576, 'synset': 'sassafras.n.02', 'name': 'sassafras'}, {'id': 13577, 'synset': 'basil.n.03', 'name': 'basil'}, {'id': 13578, 'synset': 'bay_leaf.n.01', 'name': 'bay_leaf'}, {'id': 13579, 'synset': 'borage.n.02', 'name': 'borage'}, {'id': 13580, 'synset': 'hyssop.n.02', 'name': 'hyssop'}, {'id': 13581, 'synset': 'caraway.n.02', 'name': 'caraway'}, {'id': 13582, 'synset': 'chervil.n.02', 'name': 'chervil'}, {'id': 13583, 'synset': 'chives.n.02', 'name': 'chives'}, {'id': 13584, 'synset': 'comfrey.n.02', 'name': 'comfrey'}, {'id': 13585, 'synset': 'coriander.n.03', 'name': 'coriander'}, {'id': 13586, 'synset': 'coriander.n.02', 'name': 'coriander'}, {'id': 13587, 'synset': 'costmary.n.02', 'name': 'costmary'}, {'id': 13588, 'synset': 'fennel.n.03', 'name': 'fennel'}, {'id': 13589, 'synset': 'fennel.n.02', 'name': 'fennel'}, {'id': 13590, 'synset': 'fennel_seed.n.01', 'name': 'fennel_seed'}, {'id': 13591, 'synset': 'fenugreek.n.02', 'name': 'fenugreek'}, {'id': 13592, 'synset': 'clove.n.03', 'name': 'clove'}, {'id': 13593, 'synset': 'garlic_chive.n.02', 'name': 'garlic_chive'}, {'id': 13594, 'synset': 'lemon_balm.n.02', 'name': 'lemon_balm'}, {'id': 13595, 'synset': 'lovage.n.02', 'name': 'lovage'}, {'id': 13596, 'synset': 'marjoram.n.02', 'name': 'marjoram'}, {'id': 13597, 'synset': 'mint.n.04', 'name': 'mint'}, {'id': 13598, 'synset': 'mustard_seed.n.01', 'name': 'mustard_seed'}, {'id': 13599, 'synset': 'mustard.n.02', 'name': 'mustard'}, {'id': 13600, 'synset': 'chinese_mustard.n.02', 'name': 'Chinese_mustard'}, {'id': 13601, 'synset': 'nasturtium.n.03', 'name': 'nasturtium'}, {'id': 13602, 'synset': 'parsley.n.02', 'name': 'parsley'}, {'id': 13603, 'synset': 'salad_burnet.n.02', 'name': 'salad_burnet'}, {'id': 13604, 'synset': 'rosemary.n.02', 'name': 'rosemary'}, {'id': 13605, 'synset': 'rue.n.02', 'name': 'rue'}, {'id': 13606, 'synset': 'sage.n.02', 'name': 'sage'}, {'id': 13607, 'synset': 'clary_sage.n.02', 'name': 'clary_sage'}, {'id': 13608, 'synset': 'savory.n.03', 'name': 'savory'}, {'id': 13609, 'synset': 'summer_savory.n.02', 'name': 'summer_savory'}, {'id': 13610, 'synset': 'winter_savory.n.02', 'name': 'winter_savory'}, {'id': 13611, 'synset': 'sweet_woodruff.n.02', 'name': 'sweet_woodruff'}, {'id': 13612, 'synset': 'sweet_cicely.n.03', 'name': 'sweet_cicely'}, {'id': 13613, 'synset': 'tarragon.n.02', 'name': 'tarragon'}, {'id': 13614, 'synset': 'thyme.n.02', 'name': 'thyme'}, {'id': 13615, 'synset': 'turmeric.n.02', 'name': 'turmeric'}, {'id': 13616, 'synset': 'caper.n.02', 'name': 'caper'}, {'id': 13617, 'synset': 'catsup.n.01', 'name': 'catsup'}, {'id': 13618, 'synset': 'cardamom.n.02', 'name': 'cardamom'}, {'id': 13619, 'synset': 'chili_powder.n.01', 'name': 'chili_powder'}, {'id': 13620, 'synset': 'chili_sauce.n.01', 'name': 'chili_sauce'}, {'id': 13621, 'synset': 'chutney.n.01', 'name': 'chutney'}, {'id': 13622, 'synset': 'steak_sauce.n.01', 'name': 'steak_sauce'}, {'id': 13623, 'synset': 'taco_sauce.n.01', 'name': 'taco_sauce'}, {'id': 13624, 'synset': 'mint_sauce.n.01', 'name': 'mint_sauce'}, {'id': 13625, 'synset': 'cranberry_sauce.n.01', 'name': 'cranberry_sauce'}, {'id': 13626, 'synset': 'curry_powder.n.01', 'name': 'curry_powder'}, {'id': 13627, 'synset': 'curry.n.01', 'name': 'curry'}, {'id': 13628, 'synset': 'lamb_curry.n.01', 'name': 'lamb_curry'}, {'id': 13629, 'synset': 'duck_sauce.n.01', 'name': 'duck_sauce'}, {'id': 13630, 'synset': 'horseradish.n.03', 'name': 'horseradish'}, {'id': 13631, 'synset': 'marinade.n.01', 'name': 'marinade'}, {'id': 13632, 'synset': 'paprika.n.02', 'name': 'paprika'}, {'id': 13633, 'synset': 'spanish_paprika.n.01', 'name': 'Spanish_paprika'}, {'id': 13634, 'synset': 'dill_pickle.n.01', 'name': 'dill_pickle'}, {'id': 13635, 'synset': 'bread_and_butter_pickle.n.01', 'name': 'bread_and_butter_pickle'}, {'id': 13636, 'synset': 'pickle_relish.n.01', 'name': 'pickle_relish'}, {'id': 13637, 'synset': 'piccalilli.n.01', 'name': 'piccalilli'}, {'id': 13638, 'synset': 'sweet_pickle.n.01', 'name': 'sweet_pickle'}, {'id': 13639, 'synset': 'soy_sauce.n.01', 'name': 'soy_sauce'}, {'id': 13640, 'synset': 'tomato_paste.n.01', 'name': 'tomato_paste'}, {'id': 13641, 'synset': 'angelica.n.03', 'name': 'angelica'}, {'id': 13642, 'synset': 'angelica.n.02', 'name': 'angelica'}, {'id': 13643, 'synset': 'almond_extract.n.01', 'name': 'almond_extract'}, {'id': 13644, 'synset': 'anise.n.02', 'name': 'anise'}, {'id': 13645, 'synset': 'chinese_anise.n.02', 'name': 'Chinese_anise'}, {'id': 13646, 'synset': 'juniper_berries.n.01', 'name': 'juniper_berries'}, {'id': 13647, 'synset': 'saffron.n.02', 'name': 'saffron'}, {'id': 13648, 'synset': 'sesame_seed.n.01', 'name': 'sesame_seed'}, {'id': 13649, 'synset': 'caraway_seed.n.01', 'name': 'caraway_seed'}, {'id': 13650, 'synset': 'poppy_seed.n.01', 'name': 'poppy_seed'}, {'id': 13651, 'synset': 'dill.n.02', 'name': 'dill'}, {'id': 13652, 'synset': 'dill_seed.n.01', 'name': 'dill_seed'}, {'id': 13653, 'synset': 'celery_seed.n.01', 'name': 'celery_seed'}, {'id': 13654, 'synset': 'lemon_extract.n.01', 'name': 'lemon_extract'}, {'id': 13655, 'synset': 'monosodium_glutamate.n.01', 'name': 'monosodium_glutamate'}, {'id': 13656, 'synset': 'vanilla_bean.n.01', 'name': 'vanilla_bean'}, {'id': 13657, 'synset': 'cider_vinegar.n.01', 'name': 'cider_vinegar'}, {'id': 13658, 'synset': 'wine_vinegar.n.01', 'name': 'wine_vinegar'}, {'id': 13659, 'synset': 'sauce.n.01', 'name': 'sauce'}, {'id': 13660, 'synset': 'anchovy_sauce.n.01', 'name': 'anchovy_sauce'}, {'id': 13661, 'synset': 'hard_sauce.n.01', 'name': 'hard_sauce'}, {'id': 13662, 'synset': 'horseradish_sauce.n.01', 'name': 'horseradish_sauce'}, {'id': 13663, 'synset': 'bolognese_pasta_sauce.n.01', 'name': 'bolognese_pasta_sauce'}, {'id': 13664, 'synset': 'carbonara.n.01', 'name': 'carbonara'}, {'id': 13665, 'synset': 'tomato_sauce.n.01', 'name': 'tomato_sauce'}, {'id': 13666, 'synset': 'tartare_sauce.n.01', 'name': 'tartare_sauce'}, {'id': 13667, 'synset': 'wine_sauce.n.01', 'name': 'wine_sauce'}, {'id': 13668, 'synset': 'marchand_de_vin.n.01', 'name': 'marchand_de_vin'}, {'id': 13669, 'synset': 'bread_sauce.n.01', 'name': 'bread_sauce'}, {'id': 13670, 'synset': 'plum_sauce.n.01', 'name': 'plum_sauce'}, {'id': 13671, 'synset': 'peach_sauce.n.01', 'name': 'peach_sauce'}, {'id': 13672, 'synset': 'apricot_sauce.n.01', 'name': 'apricot_sauce'}, {'id': 13673, 'synset': 'pesto.n.01', 'name': 'pesto'}, {'id': 13674, 'synset': 'ravigote.n.01', 'name': 'ravigote'}, {'id': 13675, 'synset': 'remoulade_sauce.n.01', 'name': 'remoulade_sauce'}, {'id': 13676, 'synset': 'dressing.n.01', 'name': 'dressing'}, {'id': 13677, 'synset': 'sauce_louis.n.01', 'name': 'sauce_Louis'}, {'id': 13678, 'synset': 'bleu_cheese_dressing.n.01', 'name': 'bleu_cheese_dressing'}, {'id': 13679, 'synset': 'blue_cheese_dressing.n.01', 'name': 'blue_cheese_dressing'}, {'id': 13680, 'synset': 'french_dressing.n.01', 'name': 'French_dressing'}, {'id': 13681, 'synset': 'lorenzo_dressing.n.01', 'name': 'Lorenzo_dressing'}, {'id': 13682, 'synset': 'anchovy_dressing.n.01', 'name': 'anchovy_dressing'}, {'id': 13683, 'synset': 'italian_dressing.n.01', 'name': 'Italian_dressing'}, {'id': 13684, 'synset': 'half-and-half_dressing.n.01', 'name': 'half-and-half_dressing'}, {'id': 13685, 'synset': 'mayonnaise.n.01', 'name': 'mayonnaise'}, {'id': 13686, 'synset': 'green_mayonnaise.n.01', 'name': 'green_mayonnaise'}, {'id': 13687, 'synset': 'aioli.n.01', 'name': 'aioli'}, {'id': 13688, 'synset': 'russian_dressing.n.01', 'name': 'Russian_dressing'}, {'id': 13689, 'synset': 'salad_cream.n.01', 'name': 'salad_cream'}, {'id': 13690, 'synset': 'thousand_island_dressing.n.01', 'name': 'Thousand_Island_dressing'}, {'id': 13691, 'synset': 'barbecue_sauce.n.01', 'name': 'barbecue_sauce'}, {'id': 13692, 'synset': 'hollandaise.n.01', 'name': 'hollandaise'}, {'id': 13693, 'synset': 'bearnaise.n.01', 'name': 'bearnaise'}, {'id': 13694, 'synset': 'bercy.n.01', 'name': 'Bercy'}, {'id': 13695, 'synset': 'bordelaise.n.01', 'name': 'bordelaise'}, {'id': 13696, 'synset': 'bourguignon.n.01', 'name': 'bourguignon'}, {'id': 13697, 'synset': 'brown_sauce.n.02', 'name': 'brown_sauce'}, {'id': 13698, 'synset': 'espagnole.n.01', 'name': 'Espagnole'}, {'id': 13699, 'synset': 'chinese_brown_sauce.n.01', 'name': 'Chinese_brown_sauce'}, {'id': 13700, 'synset': 'blanc.n.01', 'name': 'blanc'}, {'id': 13701, 'synset': 'cheese_sauce.n.01', 'name': 'cheese_sauce'}, {'id': 13702, 'synset': 'chocolate_sauce.n.01', 'name': 'chocolate_sauce'}, {'id': 13703, 'synset': 'hot-fudge_sauce.n.01', 'name': 'hot-fudge_sauce'}, {'id': 13704, 'synset': 'cocktail_sauce.n.01', 'name': 'cocktail_sauce'}, {'id': 13705, 'synset': 'colbert.n.01', 'name': 'Colbert'}, {'id': 13706, 'synset': 'white_sauce.n.01', 'name': 'white_sauce'}, {'id': 13707, 'synset': 'cream_sauce.n.01', 'name': 'cream_sauce'}, {'id': 13708, 'synset': 'mornay_sauce.n.01', 'name': 'Mornay_sauce'}, {'id': 13709, 'synset': 'demiglace.n.01', 'name': 'demiglace'}, {'id': 13710, 'synset': 'gravy.n.02', 'name': 'gravy'}, {'id': 13711, 'synset': 'gravy.n.01', 'name': 'gravy'}, {'id': 13712, 'synset': 'spaghetti_sauce.n.01', 'name': 'spaghetti_sauce'}, {'id': 13713, 'synset': 'marinara.n.01', 'name': 'marinara'}, {'id': 13714, 'synset': 'mole.n.03', 'name': 'mole'}, {'id': 13715, 'synset': "hunter's_sauce.n.01", 'name': "hunter's_sauce"}, {'id': 13716, 'synset': 'mushroom_sauce.n.01', 'name': 'mushroom_sauce'}, {'id': 13717, 'synset': 'mustard_sauce.n.01', 'name': 'mustard_sauce'}, {'id': 13718, 'synset': 'nantua.n.01', 'name': 'Nantua'}, {'id': 13719, 'synset': 'hungarian_sauce.n.01', 'name': 'Hungarian_sauce'}, {'id': 13720, 'synset': 'pepper_sauce.n.01', 'name': 'pepper_sauce'}, {'id': 13721, 'synset': 'roux.n.01', 'name': 'roux'}, {'id': 13722, 'synset': 'smitane.n.01', 'name': 'Smitane'}, {'id': 13723, 'synset': 'soubise.n.01', 'name': 'Soubise'}, {'id': 13724, 'synset': 'lyonnaise_sauce.n.01', 'name': 'Lyonnaise_sauce'}, {'id': 13725, 'synset': 'veloute.n.01', 'name': 'veloute'}, {'id': 13726, 'synset': 'allemande.n.01', 'name': 'allemande'}, {'id': 13727, 'synset': 'caper_sauce.n.01', 'name': 'caper_sauce'}, {'id': 13728, 'synset': 'poulette.n.01', 'name': 'poulette'}, {'id': 13729, 'synset': 'curry_sauce.n.01', 'name': 'curry_sauce'}, {'id': 13730, 'synset': 'worcester_sauce.n.01', 'name': 'Worcester_sauce'}, {'id': 13731, 'synset': 'coconut_milk.n.01', 'name': 'coconut_milk'}, {'id': 13732, 'synset': 'egg_white.n.01', 'name': 'egg_white'}, {'id': 13733, 'synset': 'hard-boiled_egg.n.01', 'name': 'hard-boiled_egg'}, {'id': 13734, 'synset': 'easter_egg.n.02', 'name': 'Easter_egg'}, {'id': 13735, 'synset': 'easter_egg.n.01', 'name': 'Easter_egg'}, {'id': 13736, 'synset': 'chocolate_egg.n.01', 'name': 'chocolate_egg'}, {'id': 13737, 'synset': 'candy_egg.n.01', 'name': 'candy_egg'}, {'id': 13738, 'synset': 'poached_egg.n.01', 'name': 'poached_egg'}, {'id': 13739, 'synset': 'scrambled_eggs.n.01', 'name': 'scrambled_eggs'}, {'id': 13740, 'synset': 'deviled_egg.n.01', 'name': 'deviled_egg'}, {'id': 13741, 'synset': 'shirred_egg.n.01', 'name': 'shirred_egg'}, {'id': 13742, 'synset': 'firm_omelet.n.01', 'name': 'firm_omelet'}, {'id': 13743, 'synset': 'french_omelet.n.01', 'name': 'French_omelet'}, {'id': 13744, 'synset': 'fluffy_omelet.n.01', 'name': 'fluffy_omelet'}, {'id': 13745, 'synset': 'western_omelet.n.01', 'name': 'western_omelet'}, {'id': 13746, 'synset': 'souffle.n.01', 'name': 'souffle'}, {'id': 13747, 'synset': 'fried_egg.n.01', 'name': 'fried_egg'}, {'id': 13748, 'synset': 'dairy_product.n.01', 'name': 'dairy_product'}, {'id': 13749, 'synset': 'milk.n.04', 'name': 'milk'}, {'id': 13750, 'synset': 'sour_milk.n.01', 'name': 'sour_milk'}, {'id': 13751, 'synset': 'formula.n.06', 'name': 'formula'}, {'id': 13752, 'synset': 'pasteurized_milk.n.01', 'name': 'pasteurized_milk'}, {'id': 13753, 'synset': "cows'_milk.n.01", 'name': "cows'_milk"}, {'id': 13754, 'synset': "yak's_milk.n.01", 'name': "yak's_milk"}, {'id': 13755, 'synset': "goats'_milk.n.01", 'name': "goats'_milk"}, {'id': 13756, 'synset': 'acidophilus_milk.n.01', 'name': 'acidophilus_milk'}, {'id': 13757, 'synset': 'raw_milk.n.01', 'name': 'raw_milk'}, {'id': 13758, 'synset': 'scalded_milk.n.01', 'name': 'scalded_milk'}, {'id': 13759, 'synset': 'homogenized_milk.n.01', 'name': 'homogenized_milk'}, {'id': 13760, 'synset': 'certified_milk.n.01', 'name': 'certified_milk'}, {'id': 13761, 'synset': 'powdered_milk.n.01', 'name': 'powdered_milk'}, {'id': 13762, 'synset': 'nonfat_dry_milk.n.01', 'name': 'nonfat_dry_milk'}, {'id': 13763, 'synset': 'evaporated_milk.n.01', 'name': 'evaporated_milk'}, {'id': 13764, 'synset': 'condensed_milk.n.01', 'name': 'condensed_milk'}, {'id': 13765, 'synset': 'skim_milk.n.01', 'name': 'skim_milk'}, {'id': 13766, 'synset': 'semi-skimmed_milk.n.01', 'name': 'semi-skimmed_milk'}, {'id': 13767, 'synset': 'whole_milk.n.01', 'name': 'whole_milk'}, {'id': 13768, 'synset': 'low-fat_milk.n.01', 'name': 'low-fat_milk'}, {'id': 13769, 'synset': 'buttermilk.n.01', 'name': 'buttermilk'}, {'id': 13770, 'synset': 'cream.n.02', 'name': 'cream'}, {'id': 13771, 'synset': 'clotted_cream.n.01', 'name': 'clotted_cream'}, {'id': 13772, 'synset': 'double_creme.n.01', 'name': 'double_creme'}, {'id': 13773, 'synset': 'half-and-half.n.01', 'name': 'half-and-half'}, {'id': 13774, 'synset': 'heavy_cream.n.01', 'name': 'heavy_cream'}, {'id': 13775, 'synset': 'light_cream.n.01', 'name': 'light_cream'}, {'id': 13776, 'synset': 'whipping_cream.n.01', 'name': 'whipping_cream'}, {'id': 13777, 'synset': 'clarified_butter.n.01', 'name': 'clarified_butter'}, {'id': 13778, 'synset': 'ghee.n.01', 'name': 'ghee'}, {'id': 13779, 'synset': 'brown_butter.n.01', 'name': 'brown_butter'}, {'id': 13780, 'synset': 'meuniere_butter.n.01', 'name': 'Meuniere_butter'}, {'id': 13781, 'synset': 'blueberry_yogurt.n.01', 'name': 'blueberry_yogurt'}, {'id': 13782, 'synset': 'raita.n.01', 'name': 'raita'}, {'id': 13783, 'synset': 'whey.n.02', 'name': 'whey'}, {'id': 13784, 'synset': 'curd.n.02', 'name': 'curd'}, {'id': 13785, 'synset': 'curd.n.01', 'name': 'curd'}, {'id': 13786, 'synset': 'clabber.n.01', 'name': 'clabber'}, {'id': 13787, 'synset': 'cheese.n.01', 'name': 'cheese'}, {'id': 13788, 'synset': 'paring.n.02', 'name': 'paring'}, {'id': 13789, 'synset': 'cream_cheese.n.01', 'name': 'cream_cheese'}, {'id': 13790, 'synset': 'double_cream.n.01', 'name': 'double_cream'}, {'id': 13791, 'synset': 'mascarpone.n.01', 'name': 'mascarpone'}, {'id': 13792, 'synset': 'triple_cream.n.01', 'name': 'triple_cream'}, {'id': 13793, 'synset': 'cottage_cheese.n.01', 'name': 'cottage_cheese'}, {'id': 13794, 'synset': 'process_cheese.n.01', 'name': 'process_cheese'}, {'id': 13795, 'synset': 'bleu.n.01', 'name': 'bleu'}, {'id': 13796, 'synset': 'stilton.n.01', 'name': 'Stilton'}, {'id': 13797, 'synset': 'roquefort.n.01', 'name': 'Roquefort'}, {'id': 13798, 'synset': 'gorgonzola.n.01', 'name': 'gorgonzola'}, {'id': 13799, 'synset': 'danish_blue.n.01', 'name': 'Danish_blue'}, {'id': 13800, 'synset': 'bavarian_blue.n.01', 'name': 'Bavarian_blue'}, {'id': 13801, 'synset': 'brie.n.01', 'name': 'Brie'}, {'id': 13802, 'synset': 'brick_cheese.n.01', 'name': 'brick_cheese'}, {'id': 13803, 'synset': 'camembert.n.01', 'name': 'Camembert'}, {'id': 13804, 'synset': 'cheddar.n.02', 'name': 'cheddar'}, {'id': 13805, 'synset': 'rat_cheese.n.01', 'name': 'rat_cheese'}, {'id': 13806, 'synset': 'cheshire_cheese.n.01', 'name': 'Cheshire_cheese'}, {'id': 13807, 'synset': 'double_gloucester.n.01', 'name': 'double_Gloucester'}, {'id': 13808, 'synset': 'edam.n.01', 'name': 'Edam'}, {'id': 13809, 'synset': 'goat_cheese.n.01', 'name': 'goat_cheese'}, {'id': 13810, 'synset': 'gouda.n.01', 'name': 'Gouda'}, {'id': 13811, 'synset': 'grated_cheese.n.01', 'name': 'grated_cheese'}, {'id': 13812, 'synset': 'hand_cheese.n.01', 'name': 'hand_cheese'}, {'id': 13813, 'synset': 'liederkranz.n.01', 'name': 'Liederkranz'}, {'id': 13814, 'synset': 'limburger.n.01', 'name': 'Limburger'}, {'id': 13815, 'synset': 'mozzarella.n.01', 'name': 'mozzarella'}, {'id': 13816, 'synset': 'muenster.n.01', 'name': 'Muenster'}, {'id': 13817, 'synset': 'parmesan.n.01', 'name': 'Parmesan'}, {'id': 13818, 'synset': 'quark_cheese.n.01', 'name': 'quark_cheese'}, {'id': 13819, 'synset': 'ricotta.n.01', 'name': 'ricotta'}, {'id': 13820, 'synset': 'swiss_cheese.n.01', 'name': 'Swiss_cheese'}, {'id': 13821, 'synset': 'emmenthal.n.01', 'name': 'Emmenthal'}, {'id': 13822, 'synset': 'gruyere.n.01', 'name': 'Gruyere'}, {'id': 13823, 'synset': 'sapsago.n.01', 'name': 'sapsago'}, {'id': 13824, 'synset': 'velveeta.n.01', 'name': 'Velveeta'}, {'id': 13825, 'synset': 'nut_butter.n.01', 'name': 'nut_butter'}, {'id': 13826, 'synset': 'marshmallow_fluff.n.01', 'name': 'marshmallow_fluff'}, {'id': 13827, 'synset': 'onion_butter.n.01', 'name': 'onion_butter'}, {'id': 13828, 'synset': 'pimento_butter.n.01', 'name': 'pimento_butter'}, {'id': 13829, 'synset': 'shrimp_butter.n.01', 'name': 'shrimp_butter'}, {'id': 13830, 'synset': 'lobster_butter.n.01', 'name': 'lobster_butter'}, {'id': 13831, 'synset': 'yak_butter.n.01', 'name': 'yak_butter'}, {'id': 13832, 'synset': 'spread.n.05', 'name': 'spread'}, {'id': 13833, 'synset': 'cheese_spread.n.01', 'name': 'cheese_spread'}, {'id': 13834, 'synset': 'anchovy_butter.n.01', 'name': 'anchovy_butter'}, {'id': 13835, 'synset': 'fishpaste.n.01', 'name': 'fishpaste'}, {'id': 13836, 'synset': 'garlic_butter.n.01', 'name': 'garlic_butter'}, {'id': 13837, 'synset': 'miso.n.01', 'name': 'miso'}, {'id': 13838, 'synset': 'wasabi.n.02', 'name': 'wasabi'}, {'id': 13839, 'synset': 'snail_butter.n.01', 'name': 'snail_butter'}, {'id': 13840, 'synset': 'pate.n.01', 'name': 'pate'}, {'id': 13841, 'synset': 'duck_pate.n.01', 'name': 'duck_pate'}, {'id': 13842, 'synset': 'foie_gras.n.01', 'name': 'foie_gras'}, {'id': 13843, 'synset': 'tapenade.n.01', 'name': 'tapenade'}, {'id': 13844, 'synset': 'tahini.n.01', 'name': 'tahini'}, {'id': 13845, 'synset': 'sweetening.n.01', 'name': 'sweetening'}, {'id': 13846, 'synset': 'aspartame.n.01', 'name': 'aspartame'}, {'id': 13847, 'synset': 'saccharin.n.01', 'name': 'saccharin'}, {'id': 13848, 'synset': 'sugar.n.01', 'name': 'sugar'}, {'id': 13849, 'synset': 'syrup.n.01', 'name': 'syrup'}, {'id': 13850, 'synset': 'sugar_syrup.n.01', 'name': 'sugar_syrup'}, {'id': 13851, 'synset': 'molasses.n.01', 'name': 'molasses'}, {'id': 13852, 'synset': 'sorghum.n.03', 'name': 'sorghum'}, {'id': 13853, 'synset': 'treacle.n.01', 'name': 'treacle'}, {'id': 13854, 'synset': 'grenadine.n.01', 'name': 'grenadine'}, {'id': 13855, 'synset': 'maple_syrup.n.01', 'name': 'maple_syrup'}, {'id': 13856, 'synset': 'corn_syrup.n.01', 'name': 'corn_syrup'}, {'id': 13857, 'synset': 'miraculous_food.n.01', 'name': 'miraculous_food'}, {'id': 13858, 'synset': 'dough.n.01', 'name': 'dough'}, {'id': 13859, 'synset': 'bread_dough.n.01', 'name': 'bread_dough'}, {'id': 13860, 'synset': 'pancake_batter.n.01', 'name': 'pancake_batter'}, {'id': 13861, 'synset': 'fritter_batter.n.01', 'name': 'fritter_batter'}, {'id': 13862, 'synset': 'coq_au_vin.n.01', 'name': 'coq_au_vin'}, {'id': 13863, 'synset': 'chicken_provencale.n.01', 'name': 'chicken_provencale'}, {'id': 13864, 'synset': 'chicken_and_rice.n.01', 'name': 'chicken_and_rice'}, {'id': 13865, 'synset': 'moo_goo_gai_pan.n.01', 'name': 'moo_goo_gai_pan'}, {'id': 13866, 'synset': 'arroz_con_pollo.n.01', 'name': 'arroz_con_pollo'}, {'id': 13867, 'synset': 'bacon_and_eggs.n.02', 'name': 'bacon_and_eggs'}, {'id': 13868, 'synset': 'barbecued_spareribs.n.01', 'name': 'barbecued_spareribs'}, {'id': 13869, 'synset': 'beef_bourguignonne.n.01', 'name': 'beef_Bourguignonne'}, {'id': 13870, 'synset': 'beef_wellington.n.01', 'name': 'beef_Wellington'}, {'id': 13871, 'synset': 'bitok.n.01', 'name': 'bitok'}, {'id': 13872, 'synset': 'boiled_dinner.n.01', 'name': 'boiled_dinner'}, {'id': 13873, 'synset': 'boston_baked_beans.n.01', 'name': 'Boston_baked_beans'}, {'id': 13874, 'synset': 'bubble_and_squeak.n.01', 'name': 'bubble_and_squeak'}, {'id': 13875, 'synset': 'pasta.n.01', 'name': 'pasta'}, {'id': 13876, 'synset': 'cannelloni.n.01', 'name': 'cannelloni'}, {'id': 13877, 'synset': 'carbonnade_flamande.n.01', 'name': 'carbonnade_flamande'}, {'id': 13878, 'synset': 'cheese_souffle.n.01', 'name': 'cheese_souffle'}, {'id': 13879, 'synset': 'chicken_marengo.n.01', 'name': 'chicken_Marengo'}, {'id': 13880, 'synset': 'chicken_cordon_bleu.n.01', 'name': 'chicken_cordon_bleu'}, {'id': 13881, 'synset': 'maryland_chicken.n.01', 'name': 'Maryland_chicken'}, {'id': 13882, 'synset': 'chicken_paprika.n.01', 'name': 'chicken_paprika'}, {'id': 13883, 'synset': 'chicken_tetrazzini.n.01', 'name': 'chicken_Tetrazzini'}, {'id': 13884, 'synset': 'tetrazzini.n.01', 'name': 'Tetrazzini'}, {'id': 13885, 'synset': 'chicken_kiev.n.01', 'name': 'chicken_Kiev'}, {'id': 13886, 'synset': 'chili.n.01', 'name': 'chili'}, {'id': 13887, 'synset': 'chili_dog.n.01', 'name': 'chili_dog'}, {'id': 13888, 'synset': 'chop_suey.n.01', 'name': 'chop_suey'}, {'id': 13889, 'synset': 'chow_mein.n.01', 'name': 'chow_mein'}, {'id': 13890, 'synset': 'codfish_ball.n.01', 'name': 'codfish_ball'}, {'id': 13891, 'synset': 'coquille.n.01', 'name': 'coquille'}, {'id': 13892, 'synset': 'coquilles_saint-jacques.n.01', 'name': 'coquilles_Saint-Jacques'}, {'id': 13893, 'synset': 'croquette.n.01', 'name': 'croquette'}, {'id': 13894, 'synset': 'cottage_pie.n.01', 'name': 'cottage_pie'}, {'id': 13895, 'synset': 'rissole.n.01', 'name': 'rissole'}, {'id': 13896, 'synset': 'dolmas.n.01', 'name': 'dolmas'}, {'id': 13897, 'synset': 'egg_foo_yong.n.01', 'name': 'egg_foo_yong'}, {'id': 13898, 'synset': 'eggs_benedict.n.01', 'name': 'eggs_Benedict'}, {'id': 13899, 'synset': 'enchilada.n.01', 'name': 'enchilada'}, {'id': 13900, 'synset': 'falafel.n.01', 'name': 'falafel'}, {'id': 13901, 'synset': 'fish_and_chips.n.01', 'name': 'fish_and_chips'}, {'id': 13902, 'synset': 'fondue.n.02', 'name': 'fondue'}, {'id': 13903, 'synset': 'cheese_fondue.n.01', 'name': 'cheese_fondue'}, {'id': 13904, 'synset': 'chocolate_fondue.n.01', 'name': 'chocolate_fondue'}, {'id': 13905, 'synset': 'fondue.n.01', 'name': 'fondue'}, {'id': 13906, 'synset': 'beef_fondue.n.01', 'name': 'beef_fondue'}, {'id': 13907, 'synset': 'fried_rice.n.01', 'name': 'fried_rice'}, {'id': 13908, 'synset': 'frittata.n.01', 'name': 'frittata'}, {'id': 13909, 'synset': 'frog_legs.n.01', 'name': 'frog_legs'}, {'id': 13910, 'synset': 'galantine.n.01', 'name': 'galantine'}, {'id': 13911, 'synset': 'gefilte_fish.n.01', 'name': 'gefilte_fish'}, {'id': 13912, 'synset': 'haggis.n.01', 'name': 'haggis'}, {'id': 13913, 'synset': 'ham_and_eggs.n.01', 'name': 'ham_and_eggs'}, {'id': 13914, 'synset': 'hash.n.01', 'name': 'hash'}, {'id': 13915, 'synset': 'corned_beef_hash.n.01', 'name': 'corned_beef_hash'}, {'id': 13916, 'synset': 'jambalaya.n.01', 'name': 'jambalaya'}, {'id': 13917, 'synset': 'kabob.n.01', 'name': 'kabob'}, {'id': 13918, 'synset': 'kedgeree.n.01', 'name': 'kedgeree'}, {'id': 13919, 'synset': 'souvlaki.n.01', 'name': 'souvlaki'}, {'id': 13920, 'synset': 'seafood_newburg.n.01', 'name': 'seafood_Newburg'}, {'id': 13921, 'synset': 'lobster_newburg.n.01', 'name': 'lobster_Newburg'}, {'id': 13922, 'synset': 'shrimp_newburg.n.01', 'name': 'shrimp_Newburg'}, {'id': 13923, 'synset': 'newburg_sauce.n.01', 'name': 'Newburg_sauce'}, {'id': 13924, 'synset': 'lobster_thermidor.n.01', 'name': 'lobster_thermidor'}, {'id': 13925, 'synset': 'lutefisk.n.01', 'name': 'lutefisk'}, {'id': 13926, 'synset': 'macaroni_and_cheese.n.01', 'name': 'macaroni_and_cheese'}, {'id': 13927, 'synset': 'macedoine.n.01', 'name': 'macedoine'}, {'id': 13928, 'synset': 'porcupine_ball.n.01', 'name': 'porcupine_ball'}, {'id': 13929, 'synset': 'swedish_meatball.n.01', 'name': 'Swedish_meatball'}, {'id': 13930, 'synset': 'meat_loaf.n.01', 'name': 'meat_loaf'}, {'id': 13931, 'synset': 'moussaka.n.01', 'name': 'moussaka'}, {'id': 13932, 'synset': 'osso_buco.n.01', 'name': 'osso_buco'}, {'id': 13933, 'synset': 'marrow.n.03', 'name': 'marrow'}, {'id': 13934, 'synset': 'pheasant_under_glass.n.01', 'name': 'pheasant_under_glass'}, {'id': 13935, 'synset': 'pigs_in_blankets.n.01', 'name': 'pigs_in_blankets'}, {'id': 13936, 'synset': 'pilaf.n.01', 'name': 'pilaf'}, {'id': 13937, 'synset': 'bulgur_pilaf.n.01', 'name': 'bulgur_pilaf'}, {'id': 13938, 'synset': 'sausage_pizza.n.01', 'name': 'sausage_pizza'}, {'id': 13939, 'synset': 'pepperoni_pizza.n.01', 'name': 'pepperoni_pizza'}, {'id': 13940, 'synset': 'cheese_pizza.n.01', 'name': 'cheese_pizza'}, {'id': 13941, 'synset': 'anchovy_pizza.n.01', 'name': 'anchovy_pizza'}, {'id': 13942, 'synset': 'sicilian_pizza.n.01', 'name': 'Sicilian_pizza'}, {'id': 13943, 'synset': 'poi.n.01', 'name': 'poi'}, {'id': 13944, 'synset': 'pork_and_beans.n.01', 'name': 'pork_and_beans'}, {'id': 13945, 'synset': 'porridge.n.01', 'name': 'porridge'}, {'id': 13946, 'synset': 'oatmeal.n.01', 'name': 'oatmeal'}, {'id': 13947, 'synset': 'loblolly.n.01', 'name': 'loblolly'}, {'id': 13948, 'synset': 'potpie.n.01', 'name': 'potpie'}, {'id': 13949, 'synset': 'rijsttaffel.n.01', 'name': 'rijsttaffel'}, {'id': 13950, 'synset': 'risotto.n.01', 'name': 'risotto'}, {'id': 13951, 'synset': 'roulade.n.01', 'name': 'roulade'}, {'id': 13952, 'synset': 'fish_loaf.n.01', 'name': 'fish_loaf'}, {'id': 13953, 'synset': 'salmon_loaf.n.01', 'name': 'salmon_loaf'}, {'id': 13954, 'synset': 'salisbury_steak.n.01', 'name': 'Salisbury_steak'}, {'id': 13955, 'synset': 'sauerbraten.n.01', 'name': 'sauerbraten'}, {'id': 13956, 'synset': 'sauerkraut.n.01', 'name': 'sauerkraut'}, {'id': 13957, 'synset': 'scallopine.n.01', 'name': 'scallopine'}, {'id': 13958, 'synset': 'veal_scallopini.n.01', 'name': 'veal_scallopini'}, {'id': 13959, 'synset': 'scampi.n.01', 'name': 'scampi'}, {'id': 13960, 'synset': 'scotch_egg.n.01', 'name': 'Scotch_egg'}, {'id': 13961, 'synset': 'scotch_woodcock.n.01', 'name': 'Scotch_woodcock'}, {'id': 13962, 'synset': 'scrapple.n.01', 'name': 'scrapple'}, {'id': 13963, 'synset': 'spaghetti_and_meatballs.n.01', 'name': 'spaghetti_and_meatballs'}, {'id': 13964, 'synset': 'spanish_rice.n.01', 'name': 'Spanish_rice'}, {'id': 13965, 'synset': 'steak_tartare.n.01', 'name': 'steak_tartare'}, {'id': 13966, 'synset': 'pepper_steak.n.02', 'name': 'pepper_steak'}, {'id': 13967, 'synset': 'steak_au_poivre.n.01', 'name': 'steak_au_poivre'}, {'id': 13968, 'synset': 'beef_stroganoff.n.01', 'name': 'beef_Stroganoff'}, {'id': 13969, 'synset': 'stuffed_cabbage.n.01', 'name': 'stuffed_cabbage'}, {'id': 13970, 'synset': 'kishke.n.01', 'name': 'kishke'}, {'id': 13971, 'synset': 'stuffed_peppers.n.01', 'name': 'stuffed_peppers'}, {'id': 13972, 'synset': 'stuffed_tomato.n.02', 'name': 'stuffed_tomato'}, {'id': 13973, 'synset': 'stuffed_tomato.n.01', 'name': 'stuffed_tomato'}, {'id': 13974, 'synset': 'succotash.n.01', 'name': 'succotash'}, {'id': 13975, 'synset': 'sukiyaki.n.01', 'name': 'sukiyaki'}, {'id': 13976, 'synset': 'sashimi.n.01', 'name': 'sashimi'}, {'id': 13977, 'synset': 'swiss_steak.n.01', 'name': 'Swiss_steak'}, {'id': 13978, 'synset': 'tamale.n.02', 'name': 'tamale'}, {'id': 13979, 'synset': 'tamale_pie.n.01', 'name': 'tamale_pie'}, {'id': 13980, 'synset': 'tempura.n.01', 'name': 'tempura'}, {'id': 13981, 'synset': 'teriyaki.n.01', 'name': 'teriyaki'}, {'id': 13982, 'synset': 'terrine.n.01', 'name': 'terrine'}, {'id': 13983, 'synset': 'welsh_rarebit.n.01', 'name': 'Welsh_rarebit'}, {'id': 13984, 'synset': 'schnitzel.n.01', 'name': 'schnitzel'}, {'id': 13985, 'synset': 'chicken_taco.n.01', 'name': 'chicken_taco'}, {'id': 13986, 'synset': 'beef_burrito.n.01', 'name': 'beef_burrito'}, {'id': 13987, 'synset': 'tostada.n.01', 'name': 'tostada'}, {'id': 13988, 'synset': 'bean_tostada.n.01', 'name': 'bean_tostada'}, {'id': 13989, 'synset': 'refried_beans.n.01', 'name': 'refried_beans'}, {'id': 13990, 'synset': 'beverage.n.01', 'name': 'beverage'}, {'id': 13991, 'synset': 'wish-wash.n.01', 'name': 'wish-wash'}, {'id': 13992, 'synset': 'concoction.n.01', 'name': 'concoction'}, {'id': 13993, 'synset': 'mix.n.01', 'name': 'mix'}, {'id': 13994, 'synset': 'filling.n.03', 'name': 'filling'}, {'id': 13995, 'synset': 'lekvar.n.01', 'name': 'lekvar'}, {'id': 13996, 'synset': 'potion.n.01', 'name': 'potion'}, {'id': 13997, 'synset': 'elixir.n.03', 'name': 'elixir'}, {'id': 13998, 'synset': 'elixir_of_life.n.01', 'name': 'elixir_of_life'}, {'id': 13999, 'synset': 'philter.n.01', 'name': 'philter'}, {'id': 14000, 'synset': 'proof_spirit.n.01', 'name': 'proof_spirit'}, {'id': 14001, 'synset': 'home_brew.n.01', 'name': 'home_brew'}, {'id': 14002, 'synset': 'hooch.n.01', 'name': 'hooch'}, {'id': 14003, 'synset': 'kava.n.01', 'name': 'kava'}, {'id': 14004, 'synset': 'aperitif.n.01', 'name': 'aperitif'}, {'id': 14005, 'synset': 'brew.n.01', 'name': 'brew'}, {'id': 14006, 'synset': 'beer.n.01', 'name': 'beer'}, {'id': 14007, 'synset': 'draft_beer.n.01', 'name': 'draft_beer'}, {'id': 14008, 'synset': 'suds.n.02', 'name': 'suds'}, {'id': 14009, 'synset': 'munich_beer.n.01', 'name': 'Munich_beer'}, {'id': 14010, 'synset': 'bock.n.01', 'name': 'bock'}, {'id': 14011, 'synset': 'lager.n.02', 'name': 'lager'}, {'id': 14012, 'synset': 'light_beer.n.01', 'name': 'light_beer'}, {'id': 14013, 'synset': 'oktoberfest.n.01', 'name': 'Oktoberfest'}, {'id': 14014, 'synset': 'pilsner.n.01', 'name': 'Pilsner'}, {'id': 14015, 'synset': 'shebeen.n.01', 'name': 'shebeen'}, {'id': 14016, 'synset': 'weissbier.n.01', 'name': 'Weissbier'}, {'id': 14017, 'synset': 'weizenbock.n.01', 'name': 'Weizenbock'}, {'id': 14018, 'synset': 'malt.n.03', 'name': 'malt'}, {'id': 14019, 'synset': 'wort.n.02', 'name': 'wort'}, {'id': 14020, 'synset': 'malt.n.02', 'name': 'malt'}, {'id': 14021, 'synset': 'ale.n.01', 'name': 'ale'}, {'id': 14022, 'synset': 'bitter.n.01', 'name': 'bitter'}, {'id': 14023, 'synset': 'burton.n.03', 'name': 'Burton'}, {'id': 14024, 'synset': 'pale_ale.n.01', 'name': 'pale_ale'}, {'id': 14025, 'synset': 'porter.n.07', 'name': 'porter'}, {'id': 14026, 'synset': 'stout.n.01', 'name': 'stout'}, {'id': 14027, 'synset': 'guinness.n.02', 'name': 'Guinness'}, {'id': 14028, 'synset': 'kvass.n.01', 'name': 'kvass'}, {'id': 14029, 'synset': 'mead.n.03', 'name': 'mead'}, {'id': 14030, 'synset': 'metheglin.n.01', 'name': 'metheglin'}, {'id': 14031, 'synset': 'hydromel.n.01', 'name': 'hydromel'}, {'id': 14032, 'synset': 'oenomel.n.01', 'name': 'oenomel'}, {'id': 14033, 'synset': 'near_beer.n.01', 'name': 'near_beer'}, {'id': 14034, 'synset': 'ginger_beer.n.01', 'name': 'ginger_beer'}, {'id': 14035, 'synset': 'sake.n.02', 'name': 'sake'}, {'id': 14036, 'synset': 'wine.n.01', 'name': 'wine'}, {'id': 14037, 'synset': 'vintage.n.01', 'name': 'vintage'}, {'id': 14038, 'synset': 'red_wine.n.01', 'name': 'red_wine'}, {'id': 14039, 'synset': 'white_wine.n.01', 'name': 'white_wine'}, {'id': 14040, 'synset': 'blush_wine.n.01', 'name': 'blush_wine'}, {'id': 14041, 'synset': 'altar_wine.n.01', 'name': 'altar_wine'}, {'id': 14042, 'synset': 'sparkling_wine.n.01', 'name': 'sparkling_wine'}, {'id': 14043, 'synset': 'champagne.n.01', 'name': 'champagne'}, {'id': 14044, 'synset': 'cold_duck.n.01', 'name': 'cold_duck'}, {'id': 14045, 'synset': 'burgundy.n.02', 'name': 'Burgundy'}, {'id': 14046, 'synset': 'beaujolais.n.01', 'name': 'Beaujolais'}, {'id': 14047, 'synset': 'medoc.n.01', 'name': 'Medoc'}, {'id': 14048, 'synset': 'canary_wine.n.01', 'name': 'Canary_wine'}, {'id': 14049, 'synset': 'chablis.n.02', 'name': 'Chablis'}, {'id': 14050, 'synset': 'montrachet.n.01', 'name': 'Montrachet'}, {'id': 14051, 'synset': 'chardonnay.n.02', 'name': 'Chardonnay'}, {'id': 14052, 'synset': 'pinot_noir.n.02', 'name': 'Pinot_noir'}, {'id': 14053, 'synset': 'pinot_blanc.n.02', 'name': 'Pinot_blanc'}, {'id': 14054, 'synset': 'bordeaux.n.02', 'name': 'Bordeaux'}, {'id': 14055, 'synset': 'claret.n.02', 'name': 'claret'}, {'id': 14056, 'synset': 'chianti.n.01', 'name': 'Chianti'}, {'id': 14057, 'synset': 'cabernet.n.01', 'name': 'Cabernet'}, {'id': 14058, 'synset': 'merlot.n.02', 'name': 'Merlot'}, {'id': 14059, 'synset': 'sauvignon_blanc.n.02', 'name': 'Sauvignon_blanc'}, {'id': 14060, 'synset': 'california_wine.n.01', 'name': 'California_wine'}, {'id': 14061, 'synset': 'cotes_de_provence.n.01', 'name': 'Cotes_de_Provence'}, {'id': 14062, 'synset': 'dessert_wine.n.01', 'name': 'dessert_wine'}, {'id': 14063, 'synset': 'dubonnet.n.01', 'name': 'Dubonnet'}, {'id': 14064, 'synset': 'jug_wine.n.01', 'name': 'jug_wine'}, {'id': 14065, 'synset': 'macon.n.02', 'name': 'macon'}, {'id': 14066, 'synset': 'moselle.n.01', 'name': 'Moselle'}, {'id': 14067, 'synset': 'muscadet.n.02', 'name': 'Muscadet'}, {'id': 14068, 'synset': 'plonk.n.01', 'name': 'plonk'}, {'id': 14069, 'synset': 'retsina.n.01', 'name': 'retsina'}, {'id': 14070, 'synset': 'rhine_wine.n.01', 'name': 'Rhine_wine'}, {'id': 14071, 'synset': 'riesling.n.02', 'name': 'Riesling'}, {'id': 14072, 'synset': 'liebfraumilch.n.01', 'name': 'liebfraumilch'}, {'id': 14073, 'synset': 'rhone_wine.n.01', 'name': 'Rhone_wine'}, {'id': 14074, 'synset': 'rioja.n.01', 'name': 'Rioja'}, {'id': 14075, 'synset': 'sack.n.04', 'name': 'sack'}, {'id': 14076, 'synset': 'saint_emilion.n.01', 'name': 'Saint_Emilion'}, {'id': 14077, 'synset': 'soave.n.01', 'name': 'Soave'}, {'id': 14078, 'synset': 'zinfandel.n.02', 'name': 'zinfandel'}, {'id': 14079, 'synset': 'sauterne.n.01', 'name': 'Sauterne'}, {'id': 14080, 'synset': 'straw_wine.n.01', 'name': 'straw_wine'}, {'id': 14081, 'synset': 'table_wine.n.01', 'name': 'table_wine'}, {'id': 14082, 'synset': 'tokay.n.01', 'name': 'Tokay'}, {'id': 14083, 'synset': 'vin_ordinaire.n.01', 'name': 'vin_ordinaire'}, {'id': 14084, 'synset': 'vermouth.n.01', 'name': 'vermouth'}, {'id': 14085, 'synset': 'sweet_vermouth.n.01', 'name': 'sweet_vermouth'}, {'id': 14086, 'synset': 'dry_vermouth.n.01', 'name': 'dry_vermouth'}, {'id': 14087, 'synset': 'chenin_blanc.n.02', 'name': 'Chenin_blanc'}, {'id': 14088, 'synset': 'verdicchio.n.02', 'name': 'Verdicchio'}, {'id': 14089, 'synset': 'vouvray.n.01', 'name': 'Vouvray'}, {'id': 14090, 'synset': 'yquem.n.01', 'name': 'Yquem'}, {'id': 14091, 'synset': 'generic.n.01', 'name': 'generic'}, {'id': 14092, 'synset': 'varietal.n.01', 'name': 'varietal'}, {'id': 14093, 'synset': 'fortified_wine.n.01', 'name': 'fortified_wine'}, {'id': 14094, 'synset': 'madeira.n.03', 'name': 'Madeira'}, {'id': 14095, 'synset': 'malmsey.n.01', 'name': 'malmsey'}, {'id': 14096, 'synset': 'port.n.02', 'name': 'port'}, {'id': 14097, 'synset': 'sherry.n.01', 'name': 'sherry'}, {'id': 14098, 'synset': 'marsala.n.01', 'name': 'Marsala'}, {'id': 14099, 'synset': 'muscat.n.03', 'name': 'muscat'}, {'id': 14100, 'synset': 'neutral_spirits.n.01', 'name': 'neutral_spirits'}, {'id': 14101, 'synset': 'aqua_vitae.n.01', 'name': 'aqua_vitae'}, {'id': 14102, 'synset': 'eau_de_vie.n.01', 'name': 'eau_de_vie'}, {'id': 14103, 'synset': 'moonshine.n.02', 'name': 'moonshine'}, {'id': 14104, 'synset': 'bathtub_gin.n.01', 'name': 'bathtub_gin'}, {'id': 14105, 'synset': 'aquavit.n.01', 'name': 'aquavit'}, {'id': 14106, 'synset': 'arrack.n.01', 'name': 'arrack'}, {'id': 14107, 'synset': 'bitters.n.01', 'name': 'bitters'}, {'id': 14108, 'synset': 'brandy.n.01', 'name': 'brandy'}, {'id': 14109, 'synset': 'applejack.n.01', 'name': 'applejack'}, {'id': 14110, 'synset': 'calvados.n.01', 'name': 'Calvados'}, {'id': 14111, 'synset': 'armagnac.n.01', 'name': 'Armagnac'}, {'id': 14112, 'synset': 'cognac.n.01', 'name': 'Cognac'}, {'id': 14113, 'synset': 'grappa.n.01', 'name': 'grappa'}, {'id': 14114, 'synset': 'kirsch.n.01', 'name': 'kirsch'}, {'id': 14115, 'synset': 'slivovitz.n.01', 'name': 'slivovitz'}, {'id': 14116, 'synset': 'gin.n.01', 'name': 'gin'}, {'id': 14117, 'synset': 'sloe_gin.n.01', 'name': 'sloe_gin'}, {'id': 14118, 'synset': 'geneva.n.02', 'name': 'geneva'}, {'id': 14119, 'synset': 'grog.n.01', 'name': 'grog'}, {'id': 14120, 'synset': 'ouzo.n.01', 'name': 'ouzo'}, {'id': 14121, 'synset': 'rum.n.01', 'name': 'rum'}, {'id': 14122, 'synset': 'demerara.n.04', 'name': 'demerara'}, {'id': 14123, 'synset': 'jamaica_rum.n.01', 'name': 'Jamaica_rum'}, {'id': 14124, 'synset': 'schnapps.n.01', 'name': 'schnapps'}, {'id': 14125, 'synset': 'pulque.n.01', 'name': 'pulque'}, {'id': 14126, 'synset': 'mescal.n.02', 'name': 'mescal'}, {'id': 14127, 'synset': 'whiskey.n.01', 'name': 'whiskey'}, {'id': 14128, 'synset': 'blended_whiskey.n.01', 'name': 'blended_whiskey'}, {'id': 14129, 'synset': 'bourbon.n.02', 'name': 'bourbon'}, {'id': 14130, 'synset': 'corn_whiskey.n.01', 'name': 'corn_whiskey'}, {'id': 14131, 'synset': 'firewater.n.01', 'name': 'firewater'}, {'id': 14132, 'synset': 'irish.n.02', 'name': 'Irish'}, {'id': 14133, 'synset': 'poteen.n.01', 'name': 'poteen'}, {'id': 14134, 'synset': 'rye.n.03', 'name': 'rye'}, {'id': 14135, 'synset': 'scotch.n.02', 'name': 'Scotch'}, {'id': 14136, 'synset': 'sour_mash.n.02', 'name': 'sour_mash'}, {'id': 14137, 'synset': 'liqueur.n.01', 'name': 'liqueur'}, {'id': 14138, 'synset': 'absinth.n.01', 'name': 'absinth'}, {'id': 14139, 'synset': 'amaretto.n.01', 'name': 'amaretto'}, {'id': 14140, 'synset': 'anisette.n.01', 'name': 'anisette'}, {'id': 14141, 'synset': 'benedictine.n.02', 'name': 'benedictine'}, {'id': 14142, 'synset': 'chartreuse.n.01', 'name': 'Chartreuse'}, {'id': 14143, 'synset': 'coffee_liqueur.n.01', 'name': 'coffee_liqueur'}, {'id': 14144, 'synset': 'creme_de_cacao.n.01', 'name': 'creme_de_cacao'}, {'id': 14145, 'synset': 'creme_de_menthe.n.01', 'name': 'creme_de_menthe'}, {'id': 14146, 'synset': 'creme_de_fraise.n.01', 'name': 'creme_de_fraise'}, {'id': 14147, 'synset': 'drambuie.n.01', 'name': 'Drambuie'}, {'id': 14148, 'synset': 'galliano.n.01', 'name': 'Galliano'}, {'id': 14149, 'synset': 'orange_liqueur.n.01', 'name': 'orange_liqueur'}, {'id': 14150, 'synset': 'curacao.n.02', 'name': 'curacao'}, {'id': 14151, 'synset': 'triple_sec.n.01', 'name': 'triple_sec'}, {'id': 14152, 'synset': 'grand_marnier.n.01', 'name': 'Grand_Marnier'}, {'id': 14153, 'synset': 'kummel.n.01', 'name': 'kummel'}, {'id': 14154, 'synset': 'maraschino.n.01', 'name': 'maraschino'}, {'id': 14155, 'synset': 'pastis.n.01', 'name': 'pastis'}, {'id': 14156, 'synset': 'pernod.n.01', 'name': 'Pernod'}, {'id': 14157, 'synset': 'pousse-cafe.n.01', 'name': 'pousse-cafe'}, {'id': 14158, 'synset': 'kahlua.n.01', 'name': 'Kahlua'}, {'id': 14159, 'synset': 'ratafia.n.01', 'name': 'ratafia'}, {'id': 14160, 'synset': 'sambuca.n.01', 'name': 'sambuca'}, {'id': 14161, 'synset': 'mixed_drink.n.01', 'name': 'mixed_drink'}, {'id': 14162, 'synset': 'cocktail.n.01', 'name': 'cocktail'}, {'id': 14163, 'synset': 'dom_pedro.n.01', 'name': 'Dom_Pedro'}, {'id': 14164, 'synset': 'highball.n.01', 'name': 'highball'}, {'id': 14165, 'synset': 'mixer.n.02', 'name': 'mixer'}, {'id': 14166, 'synset': 'bishop.n.02', 'name': 'bishop'}, {'id': 14167, 'synset': 'bloody_mary.n.02', 'name': 'Bloody_Mary'}, {'id': 14168, 'synset': 'virgin_mary.n.02', 'name': 'Virgin_Mary'}, {'id': 14169, 'synset': 'bullshot.n.01', 'name': 'bullshot'}, {'id': 14170, 'synset': 'cobbler.n.02', 'name': 'cobbler'}, {'id': 14171, 'synset': 'collins.n.02', 'name': 'collins'}, {'id': 14172, 'synset': 'cooler.n.02', 'name': 'cooler'}, {'id': 14173, 'synset': 'refresher.n.02', 'name': 'refresher'}, {'id': 14174, 'synset': 'daiquiri.n.01', 'name': 'daiquiri'}, {'id': 14175, 'synset': 'strawberry_daiquiri.n.01', 'name': 'strawberry_daiquiri'}, {'id': 14176, 'synset': 'nada_daiquiri.n.01', 'name': 'NADA_daiquiri'}, {'id': 14177, 'synset': 'spritzer.n.01', 'name': 'spritzer'}, {'id': 14178, 'synset': 'flip.n.02', 'name': 'flip'}, {'id': 14179, 'synset': 'gimlet.n.01', 'name': 'gimlet'}, {'id': 14180, 'synset': 'gin_and_tonic.n.01', 'name': 'gin_and_tonic'}, {'id': 14181, 'synset': 'grasshopper.n.02', 'name': 'grasshopper'}, {'id': 14182, 'synset': 'harvey_wallbanger.n.01', 'name': 'Harvey_Wallbanger'}, {'id': 14183, 'synset': 'julep.n.01', 'name': 'julep'}, {'id': 14184, 'synset': 'manhattan.n.02', 'name': 'manhattan'}, {'id': 14185, 'synset': 'rob_roy.n.02', 'name': 'Rob_Roy'}, {'id': 14186, 'synset': 'margarita.n.01', 'name': 'margarita'}, {'id': 14187, 'synset': 'gin_and_it.n.01', 'name': 'gin_and_it'}, {'id': 14188, 'synset': 'vodka_martini.n.01', 'name': 'vodka_martini'}, {'id': 14189, 'synset': 'old_fashioned.n.01', 'name': 'old_fashioned'}, {'id': 14190, 'synset': 'pink_lady.n.01', 'name': 'pink_lady'}, {'id': 14191, 'synset': 'sazerac.n.01', 'name': 'Sazerac'}, {'id': 14192, 'synset': 'screwdriver.n.02', 'name': 'screwdriver'}, {'id': 14193, 'synset': 'sidecar.n.01', 'name': 'sidecar'}, {'id': 14194, 'synset': 'scotch_and_soda.n.01', 'name': 'Scotch_and_soda'}, {'id': 14195, 'synset': 'sling.n.01', 'name': 'sling'}, {'id': 14196, 'synset': 'brandy_sling.n.01', 'name': 'brandy_sling'}, {'id': 14197, 'synset': 'gin_sling.n.01', 'name': 'gin_sling'}, {'id': 14198, 'synset': 'rum_sling.n.01', 'name': 'rum_sling'}, {'id': 14199, 'synset': 'sour.n.01', 'name': 'sour'}, {'id': 14200, 'synset': 'whiskey_sour.n.01', 'name': 'whiskey_sour'}, {'id': 14201, 'synset': 'stinger.n.01', 'name': 'stinger'}, {'id': 14202, 'synset': 'swizzle.n.01', 'name': 'swizzle'}, {'id': 14203, 'synset': 'hot_toddy.n.01', 'name': 'hot_toddy'}, {'id': 14204, 'synset': 'zombie.n.05', 'name': 'zombie'}, {'id': 14205, 'synset': 'fizz.n.01', 'name': 'fizz'}, {'id': 14206, 'synset': 'irish_coffee.n.01', 'name': 'Irish_coffee'}, {'id': 14207, 'synset': 'cafe_au_lait.n.01', 'name': 'cafe_au_lait'}, {'id': 14208, 'synset': 'cafe_noir.n.01', 'name': 'cafe_noir'}, {'id': 14209, 'synset': 'decaffeinated_coffee.n.01', 'name': 'decaffeinated_coffee'}, {'id': 14210, 'synset': 'drip_coffee.n.01', 'name': 'drip_coffee'}, {'id': 14211, 'synset': 'espresso.n.01', 'name': 'espresso'}, {'id': 14212, 'synset': 'caffe_latte.n.01', 'name': 'caffe_latte'}, {'id': 14213, 'synset': 'iced_coffee.n.01', 'name': 'iced_coffee'}, {'id': 14214, 'synset': 'instant_coffee.n.01', 'name': 'instant_coffee'}, {'id': 14215, 'synset': 'mocha.n.03', 'name': 'mocha'}, {'id': 14216, 'synset': 'mocha.n.02', 'name': 'mocha'}, {'id': 14217, 'synset': 'cassareep.n.01', 'name': 'cassareep'}, {'id': 14218, 'synset': 'turkish_coffee.n.01', 'name': 'Turkish_coffee'}, {'id': 14219, 'synset': 'hard_cider.n.01', 'name': 'hard_cider'}, {'id': 14220, 'synset': 'scrumpy.n.01', 'name': 'scrumpy'}, {'id': 14221, 'synset': 'sweet_cider.n.01', 'name': 'sweet_cider'}, {'id': 14222, 'synset': 'mulled_cider.n.01', 'name': 'mulled_cider'}, {'id': 14223, 'synset': 'perry.n.04', 'name': 'perry'}, {'id': 14224, 'synset': 'rotgut.n.01', 'name': 'rotgut'}, {'id': 14225, 'synset': 'slug.n.05', 'name': 'slug'}, {'id': 14226, 'synset': 'criollo.n.02', 'name': 'criollo'}, {'id': 14227, 'synset': 'juice.n.01', 'name': 'juice'}, {'id': 14228, 'synset': 'nectar.n.02', 'name': 'nectar'}, {'id': 14229, 'synset': 'apple_juice.n.01', 'name': 'apple_juice'}, {'id': 14230, 'synset': 'cranberry_juice.n.01', 'name': 'cranberry_juice'}, {'id': 14231, 'synset': 'grape_juice.n.01', 'name': 'grape_juice'}, {'id': 14232, 'synset': 'must.n.02', 'name': 'must'}, {'id': 14233, 'synset': 'grapefruit_juice.n.01', 'name': 'grapefruit_juice'}, {'id': 14234, 'synset': 'frozen_orange_juice.n.01', 'name': 'frozen_orange_juice'}, {'id': 14235, 'synset': 'pineapple_juice.n.01', 'name': 'pineapple_juice'}, {'id': 14236, 'synset': 'lemon_juice.n.01', 'name': 'lemon_juice'}, {'id': 14237, 'synset': 'lime_juice.n.01', 'name': 'lime_juice'}, {'id': 14238, 'synset': 'papaya_juice.n.01', 'name': 'papaya_juice'}, {'id': 14239, 'synset': 'tomato_juice.n.01', 'name': 'tomato_juice'}, {'id': 14240, 'synset': 'carrot_juice.n.01', 'name': 'carrot_juice'}, {'id': 14241, 'synset': 'v-8_juice.n.01', 'name': 'V-8_juice'}, {'id': 14242, 'synset': 'koumiss.n.01', 'name': 'koumiss'}, {'id': 14243, 'synset': 'fruit_drink.n.01', 'name': 'fruit_drink'}, {'id': 14244, 'synset': 'limeade.n.01', 'name': 'limeade'}, {'id': 14245, 'synset': 'orangeade.n.01', 'name': 'orangeade'}, {'id': 14246, 'synset': 'malted_milk.n.02', 'name': 'malted_milk'}, {'id': 14247, 'synset': 'mate.n.09', 'name': 'mate'}, {'id': 14248, 'synset': 'mulled_wine.n.01', 'name': 'mulled_wine'}, {'id': 14249, 'synset': 'negus.n.01', 'name': 'negus'}, {'id': 14250, 'synset': 'soft_drink.n.01', 'name': 'soft_drink'}, {'id': 14251, 'synset': 'birch_beer.n.01', 'name': 'birch_beer'}, {'id': 14252, 'synset': 'bitter_lemon.n.01', 'name': 'bitter_lemon'}, {'id': 14253, 'synset': 'cola.n.02', 'name': 'cola'}, {'id': 14254, 'synset': 'cream_soda.n.01', 'name': 'cream_soda'}, {'id': 14255, 'synset': 'egg_cream.n.01', 'name': 'egg_cream'}, {'id': 14256, 'synset': 'ginger_ale.n.01', 'name': 'ginger_ale'}, {'id': 14257, 'synset': 'orange_soda.n.01', 'name': 'orange_soda'}, {'id': 14258, 'synset': 'phosphate.n.02', 'name': 'phosphate'}, {'id': 14259, 'synset': 'coca_cola.n.01', 'name': 'Coca_Cola'}, {'id': 14260, 'synset': 'pepsi.n.01', 'name': 'Pepsi'}, {'id': 14261, 'synset': 'sarsaparilla.n.02', 'name': 'sarsaparilla'}, {'id': 14262, 'synset': 'tonic.n.01', 'name': 'tonic'}, {'id': 14263, 'synset': 'coffee_bean.n.01', 'name': 'coffee_bean'}, {'id': 14264, 'synset': 'coffee.n.01', 'name': 'coffee'}, {'id': 14265, 'synset': 'cafe_royale.n.01', 'name': 'cafe_royale'}, {'id': 14266, 'synset': 'fruit_punch.n.01', 'name': 'fruit_punch'}, {'id': 14267, 'synset': 'milk_punch.n.01', 'name': 'milk_punch'}, {'id': 14268, 'synset': 'mimosa.n.03', 'name': 'mimosa'}, {'id': 14269, 'synset': 'pina_colada.n.01', 'name': 'pina_colada'}, {'id': 14270, 'synset': 'punch.n.02', 'name': 'punch'}, {'id': 14271, 'synset': 'cup.n.06', 'name': 'cup'}, {'id': 14272, 'synset': 'champagne_cup.n.01', 'name': 'champagne_cup'}, {'id': 14273, 'synset': 'claret_cup.n.01', 'name': 'claret_cup'}, {'id': 14274, 'synset': 'wassail.n.01', 'name': 'wassail'}, {'id': 14275, 'synset': "planter's_punch.n.01", 'name': "planter's_punch"}, {'id': 14276, 'synset': 'white_russian.n.02', 'name': 'White_Russian'}, {'id': 14277, 'synset': 'fish_house_punch.n.01', 'name': 'fish_house_punch'}, {'id': 14278, 'synset': 'may_wine.n.01', 'name': 'May_wine'}, {'id': 14279, 'synset': 'eggnog.n.01', 'name': 'eggnog'}, {'id': 14280, 'synset': 'cassiri.n.01', 'name': 'cassiri'}, {'id': 14281, 'synset': 'spruce_beer.n.01', 'name': 'spruce_beer'}, {'id': 14282, 'synset': 'rickey.n.01', 'name': 'rickey'}, {'id': 14283, 'synset': 'gin_rickey.n.01', 'name': 'gin_rickey'}, {'id': 14284, 'synset': 'tea.n.05', 'name': 'tea'}, {'id': 14285, 'synset': 'tea.n.01', 'name': 'tea'}, {'id': 14286, 'synset': 'tea-like_drink.n.01', 'name': 'tea-like_drink'}, {'id': 14287, 'synset': 'cambric_tea.n.01', 'name': 'cambric_tea'}, {'id': 14288, 'synset': 'cuppa.n.01', 'name': 'cuppa'}, {'id': 14289, 'synset': 'herb_tea.n.01', 'name': 'herb_tea'}, {'id': 14290, 'synset': 'tisane.n.01', 'name': 'tisane'}, {'id': 14291, 'synset': 'camomile_tea.n.01', 'name': 'camomile_tea'}, {'id': 14292, 'synset': 'ice_tea.n.01', 'name': 'ice_tea'}, {'id': 14293, 'synset': 'sun_tea.n.01', 'name': 'sun_tea'}, {'id': 14294, 'synset': 'black_tea.n.01', 'name': 'black_tea'}, {'id': 14295, 'synset': 'congou.n.01', 'name': 'congou'}, {'id': 14296, 'synset': 'darjeeling.n.01', 'name': 'Darjeeling'}, {'id': 14297, 'synset': 'orange_pekoe.n.01', 'name': 'orange_pekoe'}, {'id': 14298, 'synset': 'souchong.n.01', 'name': 'souchong'}, {'id': 14299, 'synset': 'green_tea.n.01', 'name': 'green_tea'}, {'id': 14300, 'synset': 'hyson.n.01', 'name': 'hyson'}, {'id': 14301, 'synset': 'oolong.n.01', 'name': 'oolong'}, {'id': 14302, 'synset': 'water.n.06', 'name': 'water'}, {'id': 14303, 'synset': 'bottled_water.n.01', 'name': 'bottled_water'}, {'id': 14304, 'synset': 'branch_water.n.01', 'name': 'branch_water'}, {'id': 14305, 'synset': 'spring_water.n.02', 'name': 'spring_water'}, {'id': 14306, 'synset': 'sugar_water.n.01', 'name': 'sugar_water'}, {'id': 14307, 'synset': 'drinking_water.n.01', 'name': 'drinking_water'}, {'id': 14308, 'synset': 'ice_water.n.01', 'name': 'ice_water'}, {'id': 14309, 'synset': 'soda_water.n.01', 'name': 'soda_water'}, {'id': 14310, 'synset': 'mineral_water.n.01', 'name': 'mineral_water'}, {'id': 14311, 'synset': 'seltzer.n.01', 'name': 'seltzer'}, {'id': 14312, 'synset': 'vichy_water.n.01', 'name': 'Vichy_water'}, {'id': 14313, 'synset': 'perishable.n.01', 'name': 'perishable'}, {'id': 14314, 'synset': 'couscous.n.01', 'name': 'couscous'}, {'id': 14315, 'synset': 'ramekin.n.01', 'name': 'ramekin'}, {'id': 14316, 'synset': 'multivitamin.n.01', 'name': 'multivitamin'}, {'id': 14317, 'synset': 'vitamin_pill.n.01', 'name': 'vitamin_pill'}, {'id': 14318, 'synset': 'soul_food.n.01', 'name': 'soul_food'}, {'id': 14319, 'synset': 'mold.n.06', 'name': 'mold'}, {'id': 14320, 'synset': 'people.n.01', 'name': 'people'}, {'id': 14321, 'synset': 'collection.n.01', 'name': 'collection'}, {'id': 14322, 'synset': 'book.n.07', 'name': 'book'}, {'id': 14323, 'synset': 'library.n.02', 'name': 'library'}, {'id': 14324, 'synset': 'baseball_club.n.01', 'name': 'baseball_club'}, {'id': 14325, 'synset': 'crowd.n.01', 'name': 'crowd'}, {'id': 14326, 'synset': 'class.n.02', 'name': 'class'}, {'id': 14327, 'synset': 'core.n.01', 'name': 'core'}, {'id': 14328, 'synset': 'concert_band.n.01', 'name': 'concert_band'}, {'id': 14329, 'synset': 'dance.n.02', 'name': 'dance'}, {'id': 14330, 'synset': 'wedding.n.03', 'name': 'wedding'}, {'id': 14331, 'synset': 'chain.n.01', 'name': 'chain'}, {'id': 14332, 'synset': 'power_breakfast.n.01', 'name': 'power_breakfast'}, {'id': 14333, 'synset': 'aerie.n.02', 'name': 'aerie'}, {'id': 14334, 'synset': 'agora.n.02', 'name': 'agora'}, {'id': 14335, 'synset': 'amusement_park.n.01', 'name': 'amusement_park'}, {'id': 14336, 'synset': 'aphelion.n.01', 'name': 'aphelion'}, {'id': 14337, 'synset': 'apron.n.02', 'name': 'apron'}, {'id': 14338, 'synset': 'interplanetary_space.n.01', 'name': 'interplanetary_space'}, {'id': 14339, 'synset': 'interstellar_space.n.01', 'name': 'interstellar_space'}, {'id': 14340, 'synset': 'intergalactic_space.n.01', 'name': 'intergalactic_space'}, {'id': 14341, 'synset': 'bush.n.02', 'name': 'bush'}, {'id': 14342, 'synset': 'semidesert.n.01', 'name': 'semidesert'}, {'id': 14343, 'synset': 'beam-ends.n.01', 'name': 'beam-ends'}, {'id': 14344, 'synset': 'bridgehead.n.02', 'name': 'bridgehead'}, {'id': 14345, 'synset': 'bus_stop.n.01', 'name': 'bus_stop'}, {'id': 14346, 'synset': 'campsite.n.01', 'name': 'campsite'}, {'id': 14347, 'synset': 'detention_basin.n.01', 'name': 'detention_basin'}, {'id': 14348, 'synset': 'cemetery.n.01', 'name': 'cemetery'}, {'id': 14349, 'synset': 'trichion.n.01', 'name': 'trichion'}, {'id': 14350, 'synset': 'city.n.01', 'name': 'city'}, {'id': 14351, 'synset': 'business_district.n.01', 'name': 'business_district'}, {'id': 14352, 'synset': 'outskirts.n.01', 'name': 'outskirts'}, {'id': 14353, 'synset': 'borough.n.01', 'name': 'borough'}, {'id': 14354, 'synset': 'cow_pasture.n.01', 'name': 'cow_pasture'}, {'id': 14355, 'synset': 'crest.n.01', 'name': 'crest'}, {'id': 14356, 'synset': 'eparchy.n.02', 'name': 'eparchy'}, {'id': 14357, 'synset': 'suburb.n.01', 'name': 'suburb'}, {'id': 14358, 'synset': 'stockbroker_belt.n.01', 'name': 'stockbroker_belt'}, {'id': 14359, 'synset': 'crawlspace.n.01', 'name': 'crawlspace'}, {'id': 14360, 'synset': 'sheikdom.n.01', 'name': 'sheikdom'}, {'id': 14361, 'synset': 'residence.n.01', 'name': 'residence'}, {'id': 14362, 'synset': 'domicile.n.01', 'name': 'domicile'}, {'id': 14363, 'synset': 'dude_ranch.n.01', 'name': 'dude_ranch'}, {'id': 14364, 'synset': 'farmland.n.01', 'name': 'farmland'}, {'id': 14365, 'synset': 'midfield.n.01', 'name': 'midfield'}, {'id': 14366, 'synset': 'firebreak.n.01', 'name': 'firebreak'}, {'id': 14367, 'synset': 'flea_market.n.01', 'name': 'flea_market'}, {'id': 14368, 'synset': 'battlefront.n.01', 'name': 'battlefront'}, {'id': 14369, 'synset': 'garbage_heap.n.01', 'name': 'garbage_heap'}, {'id': 14370, 'synset': 'benthos.n.01', 'name': 'benthos'}, {'id': 14371, 'synset': 'goldfield.n.01', 'name': 'goldfield'}, {'id': 14372, 'synset': 'grainfield.n.01', 'name': 'grainfield'}, {'id': 14373, 'synset': 'half-mast.n.01', 'name': 'half-mast'}, {'id': 14374, 'synset': 'hemline.n.01', 'name': 'hemline'}, {'id': 14375, 'synset': 'heronry.n.01', 'name': 'heronry'}, {'id': 14376, 'synset': 'hipline.n.02', 'name': 'hipline'}, {'id': 14377, 'synset': 'hipline.n.01', 'name': 'hipline'}, {'id': 14378, 'synset': 'hole-in-the-wall.n.01', 'name': 'hole-in-the-wall'}, {'id': 14379, 'synset': 'junkyard.n.01', 'name': 'junkyard'}, {'id': 14380, 'synset': 'isoclinic_line.n.01', 'name': 'isoclinic_line'}, {'id': 14381, 'synset': 'littoral.n.01', 'name': 'littoral'}, {'id': 14382, 'synset': 'magnetic_pole.n.01', 'name': 'magnetic_pole'}, {'id': 14383, 'synset': 'grassland.n.01', 'name': 'grassland'}, {'id': 14384, 'synset': 'mecca.n.02', 'name': 'mecca'}, {'id': 14385, 'synset': "observer's_meridian.n.01", 'name': "observer's_meridian"}, {'id': 14386, 'synset': 'prime_meridian.n.01', 'name': 'prime_meridian'}, {'id': 14387, 'synset': 'nombril.n.01', 'name': 'nombril'}, {'id': 14388, 'synset': 'no-parking_zone.n.01', 'name': 'no-parking_zone'}, {'id': 14389, 'synset': 'outdoors.n.01', 'name': 'outdoors'}, {'id': 14390, 'synset': 'fairground.n.01', 'name': 'fairground'}, {'id': 14391, 'synset': 'pasture.n.01', 'name': 'pasture'}, {'id': 14392, 'synset': 'perihelion.n.01', 'name': 'perihelion'}, {'id': 14393, 'synset': 'periselene.n.01', 'name': 'periselene'}, {'id': 14394, 'synset': 'locus_of_infection.n.01', 'name': 'locus_of_infection'}, {'id': 14395, 'synset': 'kasbah.n.01', 'name': 'kasbah'}, {'id': 14396, 'synset': 'waterfront.n.01', 'name': 'waterfront'}, {'id': 14397, 'synset': 'resort.n.01', 'name': 'resort'}, {'id': 14398, 'synset': 'resort_area.n.01', 'name': 'resort_area'}, {'id': 14399, 'synset': 'rough.n.01', 'name': 'rough'}, {'id': 14400, 'synset': 'ashram.n.02', 'name': 'ashram'}, {'id': 14401, 'synset': 'harborage.n.01', 'name': 'harborage'}, {'id': 14402, 'synset': 'scrubland.n.01', 'name': 'scrubland'}, {'id': 14403, 'synset': 'weald.n.01', 'name': 'weald'}, {'id': 14404, 'synset': 'wold.n.01', 'name': 'wold'}, {'id': 14405, 'synset': 'schoolyard.n.01', 'name': 'schoolyard'}, {'id': 14406, 'synset': 'showplace.n.01', 'name': 'showplace'}, {'id': 14407, 'synset': 'bedside.n.01', 'name': 'bedside'}, {'id': 14408, 'synset': 'sideline.n.01', 'name': 'sideline'}, {'id': 14409, 'synset': 'ski_resort.n.01', 'name': 'ski_resort'}, {'id': 14410, 'synset': 'soil_horizon.n.01', 'name': 'soil_horizon'}, {'id': 14411, 'synset': 'geological_horizon.n.01', 'name': 'geological_horizon'}, {'id': 14412, 'synset': 'coal_seam.n.01', 'name': 'coal_seam'}, {'id': 14413, 'synset': 'coalface.n.01', 'name': 'coalface'}, {'id': 14414, 'synset': 'field.n.14', 'name': 'field'}, {'id': 14415, 'synset': 'oilfield.n.01', 'name': 'oilfield'}, {'id': 14416, 'synset': 'temperate_zone.n.01', 'name': 'Temperate_Zone'}, {'id': 14417, 'synset': 'terreplein.n.01', 'name': 'terreplein'}, {'id': 14418, 'synset': 'three-mile_limit.n.01', 'name': 'three-mile_limit'}, {'id': 14419, 'synset': 'desktop.n.01', 'name': 'desktop'}, {'id': 14420, 'synset': 'top.n.01', 'name': 'top'}, {'id': 14421, 'synset': 'kampong.n.01', 'name': 'kampong'}, {'id': 14422, 'synset': 'subtropics.n.01', 'name': 'subtropics'}, {'id': 14423, 'synset': 'barrio.n.02', 'name': 'barrio'}, {'id': 14424, 'synset': 'veld.n.01', 'name': 'veld'}, {'id': 14425, 'synset': 'vertex.n.02', 'name': 'vertex'}, {'id': 14426, 'synset': 'waterline.n.01', 'name': 'waterline'}, {'id': 14427, 'synset': 'high-water_mark.n.01', 'name': 'high-water_mark'}, {'id': 14428, 'synset': 'low-water_mark.n.02', 'name': 'low-water_mark'}, {'id': 14429, 'synset': 'continental_divide.n.01', 'name': 'continental_divide'}, {'id': 14430, 'synset': 'zodiac.n.01', 'name': 'zodiac'}, {'id': 14431, 'synset': 'aegean_island.n.01', 'name': 'Aegean_island'}, {'id': 14432, 'synset': 'sultanate.n.01', 'name': 'sultanate'}, {'id': 14433, 'synset': 'swiss_canton.n.01', 'name': 'Swiss_canton'}, {'id': 14434, 'synset': 'abyssal_zone.n.01', 'name': 'abyssal_zone'}, {'id': 14435, 'synset': 'aerie.n.01', 'name': 'aerie'}, {'id': 14436, 'synset': 'air_bubble.n.01', 'name': 'air_bubble'}, {'id': 14437, 'synset': 'alluvial_flat.n.01', 'name': 'alluvial_flat'}, {'id': 14438, 'synset': 'alp.n.01', 'name': 'alp'}, {'id': 14439, 'synset': 'alpine_glacier.n.01', 'name': 'Alpine_glacier'}, {'id': 14440, 'synset': 'anthill.n.01', 'name': 'anthill'}, {'id': 14441, 'synset': 'aquifer.n.01', 'name': 'aquifer'}, {'id': 14442, 'synset': 'archipelago.n.01', 'name': 'archipelago'}, {'id': 14443, 'synset': 'arete.n.01', 'name': 'arete'}, {'id': 14444, 'synset': 'arroyo.n.01', 'name': 'arroyo'}, {'id': 14445, 'synset': 'ascent.n.01', 'name': 'ascent'}, {'id': 14446, 'synset': 'asterism.n.02', 'name': 'asterism'}, {'id': 14447, 'synset': 'asthenosphere.n.01', 'name': 'asthenosphere'}, {'id': 14448, 'synset': 'atoll.n.01', 'name': 'atoll'}, {'id': 14449, 'synset': 'bank.n.03', 'name': 'bank'}, {'id': 14450, 'synset': 'bank.n.01', 'name': 'bank'}, {'id': 14451, 'synset': 'bar.n.08', 'name': 'bar'}, {'id': 14452, 'synset': 'barbecue_pit.n.01', 'name': 'barbecue_pit'}, {'id': 14453, 'synset': 'barrier_reef.n.01', 'name': 'barrier_reef'}, {'id': 14454, 'synset': 'baryon.n.01', 'name': 'baryon'}, {'id': 14455, 'synset': 'basin.n.03', 'name': 'basin'}, {'id': 14456, 'synset': 'beach.n.01', 'name': 'beach'}, {'id': 14457, 'synset': 'honeycomb.n.01', 'name': 'honeycomb'}, {'id': 14458, 'synset': 'belay.n.01', 'name': 'belay'}, {'id': 14459, 'synset': 'ben.n.01', 'name': 'ben'}, {'id': 14460, 'synset': 'berm.n.01', 'name': 'berm'}, {'id': 14461, 'synset': 'bladder_stone.n.01', 'name': 'bladder_stone'}, {'id': 14462, 'synset': 'bluff.n.01', 'name': 'bluff'}, {'id': 14463, 'synset': 'borrow_pit.n.01', 'name': 'borrow_pit'}, {'id': 14464, 'synset': 'brae.n.01', 'name': 'brae'}, {'id': 14465, 'synset': 'bubble.n.01', 'name': 'bubble'}, {'id': 14466, 'synset': 'burrow.n.01', 'name': 'burrow'}, {'id': 14467, 'synset': 'butte.n.01', 'name': 'butte'}, {'id': 14468, 'synset': 'caldera.n.01', 'name': 'caldera'}, {'id': 14469, 'synset': 'canyon.n.01', 'name': 'canyon'}, {'id': 14470, 'synset': 'canyonside.n.01', 'name': 'canyonside'}, {'id': 14471, 'synset': 'cave.n.01', 'name': 'cave'}, {'id': 14472, 'synset': 'cavern.n.02', 'name': 'cavern'}, {'id': 14473, 'synset': 'chasm.n.01', 'name': 'chasm'}, {'id': 14474, 'synset': 'cirque.n.01', 'name': 'cirque'}, {'id': 14475, 'synset': 'cliff.n.01', 'name': 'cliff'}, {'id': 14476, 'synset': 'cloud.n.02', 'name': 'cloud'}, {'id': 14477, 'synset': 'coast.n.02', 'name': 'coast'}, {'id': 14478, 'synset': 'coastland.n.01', 'name': 'coastland'}, {'id': 14479, 'synset': 'col.n.01', 'name': 'col'}, {'id': 14480, 'synset': 'collector.n.03', 'name': 'collector'}, {'id': 14481, 'synset': 'comet.n.01', 'name': 'comet'}, {'id': 14482, 'synset': 'continental_glacier.n.01', 'name': 'continental_glacier'}, {'id': 14483, 'synset': 'coral_reef.n.01', 'name': 'coral_reef'}, {'id': 14484, 'synset': 'cove.n.02', 'name': 'cove'}, {'id': 14485, 'synset': 'crag.n.01', 'name': 'crag'}, {'id': 14486, 'synset': 'crater.n.03', 'name': 'crater'}, {'id': 14487, 'synset': 'cultivated_land.n.01', 'name': 'cultivated_land'}, {'id': 14488, 'synset': 'dale.n.01', 'name': 'dale'}, {'id': 14489, 'synset': 'defile.n.01', 'name': 'defile'}, {'id': 14490, 'synset': 'delta.n.01', 'name': 'delta'}, {'id': 14491, 'synset': 'descent.n.05', 'name': 'descent'}, {'id': 14492, 'synset': 'diapir.n.01', 'name': 'diapir'}, {'id': 14493, 'synset': 'divot.n.02', 'name': 'divot'}, {'id': 14494, 'synset': 'divot.n.01', 'name': 'divot'}, {'id': 14495, 'synset': 'down.n.04', 'name': 'down'}, {'id': 14496, 'synset': 'downhill.n.01', 'name': 'downhill'}, {'id': 14497, 'synset': 'draw.n.01', 'name': 'draw'}, {'id': 14498, 'synset': 'drey.n.01', 'name': 'drey'}, {'id': 14499, 'synset': 'drumlin.n.01', 'name': 'drumlin'}, {'id': 14500, 'synset': 'dune.n.01', 'name': 'dune'}, {'id': 14501, 'synset': 'escarpment.n.01', 'name': 'escarpment'}, {'id': 14502, 'synset': 'esker.n.01', 'name': 'esker'}, {'id': 14503, 'synset': 'fireball.n.03', 'name': 'fireball'}, {'id': 14504, 'synset': 'flare_star.n.01', 'name': 'flare_star'}, {'id': 14505, 'synset': 'floor.n.04', 'name': 'floor'}, {'id': 14506, 'synset': 'fomite.n.01', 'name': 'fomite'}, {'id': 14507, 'synset': 'foothill.n.01', 'name': 'foothill'}, {'id': 14508, 'synset': 'footwall.n.01', 'name': 'footwall'}, {'id': 14509, 'synset': 'foreland.n.02', 'name': 'foreland'}, {'id': 14510, 'synset': 'foreshore.n.01', 'name': 'foreshore'}, {'id': 14511, 'synset': 'gauge_boson.n.01', 'name': 'gauge_boson'}, {'id': 14512, 'synset': 'geological_formation.n.01', 'name': 'geological_formation'}, {'id': 14513, 'synset': 'geyser.n.01', 'name': 'geyser'}, {'id': 14514, 'synset': 'glacier.n.01', 'name': 'glacier'}, {'id': 14515, 'synset': 'glen.n.01', 'name': 'glen'}, {'id': 14516, 'synset': 'gopher_hole.n.01', 'name': 'gopher_hole'}, {'id': 14517, 'synset': 'gorge.n.01', 'name': 'gorge'}, {'id': 14518, 'synset': 'grotto.n.01', 'name': 'grotto'}, {'id': 14519, 'synset': 'growler.n.02', 'name': 'growler'}, {'id': 14520, 'synset': 'gulch.n.01', 'name': 'gulch'}, {'id': 14521, 'synset': 'gully.n.01', 'name': 'gully'}, {'id': 14522, 'synset': 'hail.n.02', 'name': 'hail'}, {'id': 14523, 'synset': 'highland.n.01', 'name': 'highland'}, {'id': 14524, 'synset': 'hill.n.01', 'name': 'hill'}, {'id': 14525, 'synset': 'hillside.n.01', 'name': 'hillside'}, {'id': 14526, 'synset': 'hole.n.05', 'name': 'hole'}, {'id': 14527, 'synset': 'hollow.n.02', 'name': 'hollow'}, {'id': 14528, 'synset': 'hot_spring.n.01', 'name': 'hot_spring'}, {'id': 14529, 'synset': 'iceberg.n.01', 'name': 'iceberg'}, {'id': 14530, 'synset': 'icecap.n.01', 'name': 'icecap'}, {'id': 14531, 'synset': 'ice_field.n.01', 'name': 'ice_field'}, {'id': 14532, 'synset': 'ice_floe.n.01', 'name': 'ice_floe'}, {'id': 14533, 'synset': 'ice_mass.n.01', 'name': 'ice_mass'}, {'id': 14534, 'synset': 'inclined_fault.n.01', 'name': 'inclined_fault'}, {'id': 14535, 'synset': 'ion.n.01', 'name': 'ion'}, {'id': 14536, 'synset': 'isthmus.n.01', 'name': 'isthmus'}, {'id': 14537, 'synset': 'kidney_stone.n.01', 'name': 'kidney_stone'}, {'id': 14538, 'synset': 'knoll.n.01', 'name': 'knoll'}, {'id': 14539, 'synset': 'kopje.n.01', 'name': 'kopje'}, {'id': 14540, 'synset': 'kuiper_belt.n.01', 'name': 'Kuiper_belt'}, {'id': 14541, 'synset': 'lake_bed.n.01', 'name': 'lake_bed'}, {'id': 14542, 'synset': 'lakefront.n.01', 'name': 'lakefront'}, {'id': 14543, 'synset': 'lakeside.n.01', 'name': 'lakeside'}, {'id': 14544, 'synset': 'landfall.n.01', 'name': 'landfall'}, {'id': 14545, 'synset': 'landfill.n.01', 'name': 'landfill'}, {'id': 14546, 'synset': 'lather.n.04', 'name': 'lather'}, {'id': 14547, 'synset': 'leak.n.01', 'name': 'leak'}, {'id': 14548, 'synset': 'ledge.n.01', 'name': 'ledge'}, {'id': 14549, 'synset': 'lepton.n.02', 'name': 'lepton'}, {'id': 14550, 'synset': 'lithosphere.n.01', 'name': 'lithosphere'}, {'id': 14551, 'synset': 'lowland.n.01', 'name': 'lowland'}, {'id': 14552, 'synset': 'lunar_crater.n.01', 'name': 'lunar_crater'}, {'id': 14553, 'synset': 'maar.n.01', 'name': 'maar'}, {'id': 14554, 'synset': 'massif.n.01', 'name': 'massif'}, {'id': 14555, 'synset': 'meander.n.01', 'name': 'meander'}, {'id': 14556, 'synset': 'mesa.n.01', 'name': 'mesa'}, {'id': 14557, 'synset': 'meteorite.n.01', 'name': 'meteorite'}, {'id': 14558, 'synset': 'microfossil.n.01', 'name': 'microfossil'}, {'id': 14559, 'synset': 'midstream.n.01', 'name': 'midstream'}, {'id': 14560, 'synset': 'molehill.n.01', 'name': 'molehill'}, {'id': 14561, 'synset': 'monocline.n.01', 'name': 'monocline'}, {'id': 14562, 'synset': 'mountain.n.01', 'name': 'mountain'}, {'id': 14563, 'synset': 'mountainside.n.01', 'name': 'mountainside'}, {'id': 14564, 'synset': 'mouth.n.04', 'name': 'mouth'}, {'id': 14565, 'synset': 'mull.n.01', 'name': 'mull'}, {'id': 14566, 'synset': 'natural_depression.n.01', 'name': 'natural_depression'}, {'id': 14567, 'synset': 'natural_elevation.n.01', 'name': 'natural_elevation'}, {'id': 14568, 'synset': 'nullah.n.01', 'name': 'nullah'}, {'id': 14569, 'synset': 'ocean.n.01', 'name': 'ocean'}, {'id': 14570, 'synset': 'ocean_floor.n.01', 'name': 'ocean_floor'}, {'id': 14571, 'synset': 'oceanfront.n.01', 'name': 'oceanfront'}, {'id': 14572, 'synset': 'outcrop.n.01', 'name': 'outcrop'}, {'id': 14573, 'synset': 'oxbow.n.01', 'name': 'oxbow'}, {'id': 14574, 'synset': 'pallasite.n.01', 'name': 'pallasite'}, {'id': 14575, 'synset': 'perforation.n.02', 'name': 'perforation'}, {'id': 14576, 'synset': 'photosphere.n.01', 'name': 'photosphere'}, {'id': 14577, 'synset': 'piedmont.n.02', 'name': 'piedmont'}, {'id': 14578, 'synset': 'piedmont_glacier.n.01', 'name': 'Piedmont_glacier'}, {'id': 14579, 'synset': 'pinetum.n.01', 'name': 'pinetum'}, {'id': 14580, 'synset': 'plage.n.01', 'name': 'plage'}, {'id': 14581, 'synset': 'plain.n.01', 'name': 'plain'}, {'id': 14582, 'synset': 'point.n.11', 'name': 'point'}, {'id': 14583, 'synset': 'polar_glacier.n.01', 'name': 'polar_glacier'}, {'id': 14584, 'synset': 'pothole.n.01', 'name': 'pothole'}, {'id': 14585, 'synset': 'precipice.n.01', 'name': 'precipice'}, {'id': 14586, 'synset': 'promontory.n.01', 'name': 'promontory'}, {'id': 14587, 'synset': 'ptyalith.n.01', 'name': 'ptyalith'}, {'id': 14588, 'synset': 'pulsar.n.01', 'name': 'pulsar'}, {'id': 14589, 'synset': 'quicksand.n.02', 'name': 'quicksand'}, {'id': 14590, 'synset': 'rabbit_burrow.n.01', 'name': 'rabbit_burrow'}, {'id': 14591, 'synset': 'radiator.n.01', 'name': 'radiator'}, {'id': 14592, 'synset': 'rainbow.n.01', 'name': 'rainbow'}, {'id': 14593, 'synset': 'range.n.04', 'name': 'range'}, {'id': 14594, 'synset': 'rangeland.n.01', 'name': 'rangeland'}, {'id': 14595, 'synset': 'ravine.n.01', 'name': 'ravine'}, {'id': 14596, 'synset': 'reef.n.01', 'name': 'reef'}, {'id': 14597, 'synset': 'ridge.n.01', 'name': 'ridge'}, {'id': 14598, 'synset': 'ridge.n.04', 'name': 'ridge'}, {'id': 14599, 'synset': 'rift_valley.n.01', 'name': 'rift_valley'}, {'id': 14600, 'synset': 'riparian_forest.n.01', 'name': 'riparian_forest'}, {'id': 14601, 'synset': 'ripple_mark.n.01', 'name': 'ripple_mark'}, {'id': 14602, 'synset': 'riverbank.n.01', 'name': 'riverbank'}, {'id': 14603, 'synset': 'riverbed.n.01', 'name': 'riverbed'}, {'id': 14604, 'synset': 'rock.n.01', 'name': 'rock'}, {'id': 14605, 'synset': 'roof.n.03', 'name': 'roof'}, {'id': 14606, 'synset': 'saltpan.n.01', 'name': 'saltpan'}, {'id': 14607, 'synset': 'sandbank.n.01', 'name': 'sandbank'}, {'id': 14608, 'synset': 'sandbar.n.01', 'name': 'sandbar'}, {'id': 14609, 'synset': 'sandpit.n.01', 'name': 'sandpit'}, {'id': 14610, 'synset': 'sanitary_landfill.n.01', 'name': 'sanitary_landfill'}, {'id': 14611, 'synset': 'sawpit.n.01', 'name': 'sawpit'}, {'id': 14612, 'synset': 'scablands.n.01', 'name': 'scablands'}, {'id': 14613, 'synset': 'seashore.n.01', 'name': 'seashore'}, {'id': 14614, 'synset': 'seaside.n.01', 'name': 'seaside'}, {'id': 14615, 'synset': 'seif_dune.n.01', 'name': 'seif_dune'}, {'id': 14616, 'synset': 'shell.n.06', 'name': 'shell'}, {'id': 14617, 'synset': 'shiner.n.02', 'name': 'shiner'}, {'id': 14618, 'synset': 'shoal.n.01', 'name': 'shoal'}, {'id': 14619, 'synset': 'shore.n.01', 'name': 'shore'}, {'id': 14620, 'synset': 'shoreline.n.01', 'name': 'shoreline'}, {'id': 14621, 'synset': 'sinkhole.n.01', 'name': 'sinkhole'}, {'id': 14622, 'synset': 'ski_slope.n.01', 'name': 'ski_slope'}, {'id': 14623, 'synset': 'sky.n.01', 'name': 'sky'}, {'id': 14624, 'synset': 'slope.n.01', 'name': 'slope'}, {'id': 14625, 'synset': 'snowcap.n.01', 'name': 'snowcap'}, {'id': 14626, 'synset': 'snowdrift.n.01', 'name': 'snowdrift'}, {'id': 14627, 'synset': 'snowfield.n.01', 'name': 'snowfield'}, {'id': 14628, 'synset': 'soapsuds.n.01', 'name': 'soapsuds'}, {'id': 14629, 'synset': 'spit.n.01', 'name': 'spit'}, {'id': 14630, 'synset': 'spoor.n.01', 'name': 'spoor'}, {'id': 14631, 'synset': 'spume.n.01', 'name': 'spume'}, {'id': 14632, 'synset': 'star.n.03', 'name': 'star'}, {'id': 14633, 'synset': 'steep.n.01', 'name': 'steep'}, {'id': 14634, 'synset': 'steppe.n.01', 'name': 'steppe'}, {'id': 14635, 'synset': 'strand.n.05', 'name': 'strand'}, {'id': 14636, 'synset': 'streambed.n.01', 'name': 'streambed'}, {'id': 14637, 'synset': 'sun.n.01', 'name': 'sun'}, {'id': 14638, 'synset': 'supernova.n.01', 'name': 'supernova'}, {'id': 14639, 'synset': 'swale.n.01', 'name': 'swale'}, {'id': 14640, 'synset': 'swamp.n.01', 'name': 'swamp'}, {'id': 14641, 'synset': 'swell.n.02', 'name': 'swell'}, {'id': 14642, 'synset': 'tableland.n.01', 'name': 'tableland'}, {'id': 14643, 'synset': 'talus.n.01', 'name': 'talus'}, {'id': 14644, 'synset': 'tangle.n.01', 'name': 'tangle'}, {'id': 14645, 'synset': 'tar_pit.n.01', 'name': 'tar_pit'}, {'id': 14646, 'synset': 'terrace.n.02', 'name': 'terrace'}, {'id': 14647, 'synset': 'tidal_basin.n.01', 'name': 'tidal_basin'}, {'id': 14648, 'synset': 'tideland.n.01', 'name': 'tideland'}, {'id': 14649, 'synset': 'tor.n.02', 'name': 'tor'}, {'id': 14650, 'synset': 'tor.n.01', 'name': 'tor'}, {'id': 14651, 'synset': 'trapezium.n.02', 'name': 'Trapezium'}, {'id': 14652, 'synset': 'troposphere.n.01', 'name': 'troposphere'}, {'id': 14653, 'synset': 'tundra.n.01', 'name': 'tundra'}, {'id': 14654, 'synset': 'twinkler.n.01', 'name': 'twinkler'}, {'id': 14655, 'synset': 'uphill.n.01', 'name': 'uphill'}, {'id': 14656, 'synset': 'urolith.n.01', 'name': 'urolith'}, {'id': 14657, 'synset': 'valley.n.01', 'name': 'valley'}, {'id': 14658, 'synset': 'vehicle-borne_transmission.n.01', 'name': 'vehicle-borne_transmission'}, {'id': 14659, 'synset': 'vein.n.04', 'name': 'vein'}, {'id': 14660, 'synset': 'volcanic_crater.n.01', 'name': 'volcanic_crater'}, {'id': 14661, 'synset': 'volcano.n.02', 'name': 'volcano'}, {'id': 14662, 'synset': 'wadi.n.01', 'name': 'wadi'}, {'id': 14663, 'synset': 'wall.n.05', 'name': 'wall'}, {'id': 14664, 'synset': 'warren.n.03', 'name': 'warren'}, {'id': 14665, 'synset': "wasp's_nest.n.01", 'name': "wasp's_nest"}, {'id': 14666, 'synset': 'watercourse.n.01', 'name': 'watercourse'}, {'id': 14667, 'synset': 'waterside.n.01', 'name': 'waterside'}, {'id': 14668, 'synset': 'water_table.n.01', 'name': 'water_table'}, {'id': 14669, 'synset': 'whinstone.n.01', 'name': 'whinstone'}, {'id': 14670, 'synset': 'wormcast.n.02', 'name': 'wormcast'}, {'id': 14671, 'synset': 'xenolith.n.01', 'name': 'xenolith'}, {'id': 14672, 'synset': 'circe.n.01', 'name': 'Circe'}, {'id': 14673, 'synset': 'gryphon.n.01', 'name': 'gryphon'}, {'id': 14674, 'synset': 'spiritual_leader.n.01', 'name': 'spiritual_leader'}, {'id': 14675, 'synset': 'messiah.n.01', 'name': 'messiah'}, {'id': 14676, 'synset': 'rhea_silvia.n.01', 'name': 'Rhea_Silvia'}, {'id': 14677, 'synset': 'number_one.n.01', 'name': 'number_one'}, {'id': 14678, 'synset': 'adventurer.n.01', 'name': 'adventurer'}, {'id': 14679, 'synset': 'anomaly.n.02', 'name': 'anomaly'}, {'id': 14680, 'synset': 'appointee.n.02', 'name': 'appointee'}, {'id': 14681, 'synset': 'argonaut.n.01', 'name': 'argonaut'}, {'id': 14682, 'synset': 'ashkenazi.n.01', 'name': 'Ashkenazi'}, {'id': 14683, 'synset': 'benefactor.n.01', 'name': 'benefactor'}, {'id': 14684, 'synset': 'color-blind_person.n.01', 'name': 'color-blind_person'}, {'id': 14685, 'synset': 'commoner.n.01', 'name': 'commoner'}, {'id': 14686, 'synset': 'conservator.n.02', 'name': 'conservator'}, {'id': 14687, 'synset': 'contrarian.n.01', 'name': 'contrarian'}, {'id': 14688, 'synset': 'contadino.n.01', 'name': 'contadino'}, {'id': 14689, 'synset': 'contestant.n.01', 'name': 'contestant'}, {'id': 14690, 'synset': 'cosigner.n.01', 'name': 'cosigner'}, {'id': 14691, 'synset': 'discussant.n.01', 'name': 'discussant'}, {'id': 14692, 'synset': 'enologist.n.01', 'name': 'enologist'}, {'id': 14693, 'synset': 'entertainer.n.01', 'name': 'entertainer'}, {'id': 14694, 'synset': 'eulogist.n.01', 'name': 'eulogist'}, {'id': 14695, 'synset': 'ex-gambler.n.01', 'name': 'ex-gambler'}, {'id': 14696, 'synset': 'experimenter.n.01', 'name': 'experimenter'}, {'id': 14697, 'synset': 'experimenter.n.02', 'name': 'experimenter'}, {'id': 14698, 'synset': 'exponent.n.02', 'name': 'exponent'}, {'id': 14699, 'synset': 'ex-president.n.01', 'name': 'ex-president'}, {'id': 14700, 'synset': 'face.n.05', 'name': 'face'}, {'id': 14701, 'synset': 'female.n.02', 'name': 'female'}, {'id': 14702, 'synset': 'finisher.n.04', 'name': 'finisher'}, {'id': 14703, 'synset': 'inhabitant.n.01', 'name': 'inhabitant'}, {'id': 14704, 'synset': 'native.n.01', 'name': 'native'}, {'id': 14705, 'synset': 'native.n.02', 'name': 'native'}, {'id': 14706, 'synset': 'juvenile.n.01', 'name': 'juvenile'}, {'id': 14707, 'synset': 'lover.n.01', 'name': 'lover'}, {'id': 14708, 'synset': 'male.n.02', 'name': 'male'}, {'id': 14709, 'synset': 'mediator.n.01', 'name': 'mediator'}, {'id': 14710, 'synset': 'mediatrix.n.01', 'name': 'mediatrix'}, {'id': 14711, 'synset': 'national.n.01', 'name': 'national'}, {'id': 14712, 'synset': 'peer.n.01', 'name': 'peer'}, {'id': 14713, 'synset': 'prize_winner.n.01', 'name': 'prize_winner'}, {'id': 14714, 'synset': 'recipient.n.01', 'name': 'recipient'}, {'id': 14715, 'synset': 'religionist.n.01', 'name': 'religionist'}, {'id': 14716, 'synset': 'sensualist.n.01', 'name': 'sensualist'}, {'id': 14717, 'synset': 'traveler.n.01', 'name': 'traveler'}, {'id': 14718, 'synset': 'unwelcome_person.n.01', 'name': 'unwelcome_person'}, {'id': 14719, 'synset': 'unskilled_person.n.01', 'name': 'unskilled_person'}, {'id': 14720, 'synset': 'worker.n.01', 'name': 'worker'}, {'id': 14721, 'synset': 'wrongdoer.n.01', 'name': 'wrongdoer'}, {'id': 14722, 'synset': 'black_african.n.01', 'name': 'Black_African'}, {'id': 14723, 'synset': 'afrikaner.n.01', 'name': 'Afrikaner'}, {'id': 14724, 'synset': 'aryan.n.01', 'name': 'Aryan'}, {'id': 14725, 'synset': 'black.n.05', 'name': 'Black'}, {'id': 14726, 'synset': 'black_woman.n.01', 'name': 'Black_woman'}, {'id': 14727, 'synset': 'mulatto.n.01', 'name': 'mulatto'}, {'id': 14728, 'synset': 'white.n.01', 'name': 'White'}, {'id': 14729, 'synset': 'circassian.n.01', 'name': 'Circassian'}, {'id': 14730, 'synset': 'semite.n.01', 'name': 'Semite'}, {'id': 14731, 'synset': 'chaldean.n.02', 'name': 'Chaldean'}, {'id': 14732, 'synset': 'elamite.n.01', 'name': 'Elamite'}, {'id': 14733, 'synset': 'white_man.n.01', 'name': 'white_man'}, {'id': 14734, 'synset': 'wasp.n.01', 'name': 'WASP'}, {'id': 14735, 'synset': 'gook.n.02', 'name': 'gook'}, {'id': 14736, 'synset': 'mongol.n.01', 'name': 'Mongol'}, {'id': 14737, 'synset': 'tatar.n.01', 'name': 'Tatar'}, {'id': 14738, 'synset': 'nahuatl.n.01', 'name': 'Nahuatl'}, {'id': 14739, 'synset': 'aztec.n.01', 'name': 'Aztec'}, {'id': 14740, 'synset': 'olmec.n.01', 'name': 'Olmec'}, {'id': 14741, 'synset': 'biloxi.n.01', 'name': 'Biloxi'}, {'id': 14742, 'synset': 'blackfoot.n.01', 'name': 'Blackfoot'}, {'id': 14743, 'synset': 'brule.n.01', 'name': 'Brule'}, {'id': 14744, 'synset': 'caddo.n.01', 'name': 'Caddo'}, {'id': 14745, 'synset': 'cheyenne.n.03', 'name': 'Cheyenne'}, {'id': 14746, 'synset': 'chickasaw.n.01', 'name': 'Chickasaw'}, {'id': 14747, 'synset': 'cocopa.n.01', 'name': 'Cocopa'}, {'id': 14748, 'synset': 'comanche.n.01', 'name': 'Comanche'}, {'id': 14749, 'synset': 'creek.n.02', 'name': 'Creek'}, {'id': 14750, 'synset': 'delaware.n.02', 'name': 'Delaware'}, {'id': 14751, 'synset': 'diegueno.n.01', 'name': 'Diegueno'}, {'id': 14752, 'synset': 'esselen.n.01', 'name': 'Esselen'}, {'id': 14753, 'synset': 'eyeish.n.01', 'name': 'Eyeish'}, {'id': 14754, 'synset': 'havasupai.n.01', 'name': 'Havasupai'}, {'id': 14755, 'synset': 'hunkpapa.n.01', 'name': 'Hunkpapa'}, {'id': 14756, 'synset': 'iowa.n.01', 'name': 'Iowa'}, {'id': 14757, 'synset': 'kalapooia.n.01', 'name': 'Kalapooia'}, {'id': 14758, 'synset': 'kamia.n.01', 'name': 'Kamia'}, {'id': 14759, 'synset': 'kekchi.n.01', 'name': 'Kekchi'}, {'id': 14760, 'synset': 'kichai.n.01', 'name': 'Kichai'}, {'id': 14761, 'synset': 'kickapoo.n.01', 'name': 'Kickapoo'}, {'id': 14762, 'synset': 'kiliwa.n.01', 'name': 'Kiliwa'}, {'id': 14763, 'synset': 'malecite.n.01', 'name': 'Malecite'}, {'id': 14764, 'synset': 'maricopa.n.01', 'name': 'Maricopa'}, {'id': 14765, 'synset': 'mohican.n.01', 'name': 'Mohican'}, {'id': 14766, 'synset': 'muskhogean.n.01', 'name': 'Muskhogean'}, {'id': 14767, 'synset': 'navaho.n.01', 'name': 'Navaho'}, {'id': 14768, 'synset': 'nootka.n.01', 'name': 'Nootka'}, {'id': 14769, 'synset': 'oglala.n.01', 'name': 'Oglala'}, {'id': 14770, 'synset': 'osage.n.01', 'name': 'Osage'}, {'id': 14771, 'synset': 'oneida.n.01', 'name': 'Oneida'}, {'id': 14772, 'synset': 'paiute.n.01', 'name': 'Paiute'}, {'id': 14773, 'synset': 'passamaquody.n.01', 'name': 'Passamaquody'}, {'id': 14774, 'synset': 'penobscot.n.01', 'name': 'Penobscot'}, {'id': 14775, 'synset': 'penutian.n.02', 'name': 'Penutian'}, {'id': 14776, 'synset': 'potawatomi.n.01', 'name': 'Potawatomi'}, {'id': 14777, 'synset': 'powhatan.n.02', 'name': 'Powhatan'}, {'id': 14778, 'synset': 'kachina.n.02', 'name': 'kachina'}, {'id': 14779, 'synset': 'salish.n.02', 'name': 'Salish'}, {'id': 14780, 'synset': 'shahaptian.n.01', 'name': 'Shahaptian'}, {'id': 14781, 'synset': 'shasta.n.01', 'name': 'Shasta'}, {'id': 14782, 'synset': 'shawnee.n.01', 'name': 'Shawnee'}, {'id': 14783, 'synset': 'sihasapa.n.01', 'name': 'Sihasapa'}, {'id': 14784, 'synset': 'teton.n.01', 'name': 'Teton'}, {'id': 14785, 'synset': 'taracahitian.n.01', 'name': 'Taracahitian'}, {'id': 14786, 'synset': 'tarahumara.n.01', 'name': 'Tarahumara'}, {'id': 14787, 'synset': 'tuscarora.n.01', 'name': 'Tuscarora'}, {'id': 14788, 'synset': 'tutelo.n.01', 'name': 'Tutelo'}, {'id': 14789, 'synset': 'yana.n.01', 'name': 'Yana'}, {'id': 14790, 'synset': 'yavapai.n.01', 'name': 'Yavapai'}, {'id': 14791, 'synset': 'yokuts.n.02', 'name': 'Yokuts'}, {'id': 14792, 'synset': 'yuma.n.01', 'name': 'Yuma'}, {'id': 14793, 'synset': 'gadaba.n.01', 'name': 'Gadaba'}, {'id': 14794, 'synset': 'kolam.n.01', 'name': 'Kolam'}, {'id': 14795, 'synset': 'kui.n.01', 'name': 'Kui'}, {'id': 14796, 'synset': 'toda.n.01', 'name': 'Toda'}, {'id': 14797, 'synset': 'tulu.n.01', 'name': 'Tulu'}, {'id': 14798, 'synset': 'gujarati.n.01', 'name': 'Gujarati'}, {'id': 14799, 'synset': 'kashmiri.n.01', 'name': 'Kashmiri'}, {'id': 14800, 'synset': 'punjabi.n.01', 'name': 'Punjabi'}, {'id': 14801, 'synset': 'slav.n.01', 'name': 'Slav'}, {'id': 14802, 'synset': 'anabaptist.n.01', 'name': 'Anabaptist'}, {'id': 14803, 'synset': 'adventist.n.01', 'name': 'Adventist'}, {'id': 14804, 'synset': 'gentile.n.03', 'name': 'gentile'}, {'id': 14805, 'synset': 'gentile.n.02', 'name': 'gentile'}, {'id': 14806, 'synset': 'catholic.n.01', 'name': 'Catholic'}, {'id': 14807, 'synset': 'old_catholic.n.01', 'name': 'Old_Catholic'}, {'id': 14808, 'synset': 'uniat.n.01', 'name': 'Uniat'}, {'id': 14809, 'synset': 'copt.n.02', 'name': 'Copt'}, {'id': 14810, 'synset': 'jewess.n.01', 'name': 'Jewess'}, {'id': 14811, 'synset': 'jihadist.n.01', 'name': 'Jihadist'}, {'id': 14812, 'synset': 'buddhist.n.01', 'name': 'Buddhist'}, {'id': 14813, 'synset': 'zen_buddhist.n.01', 'name': 'Zen_Buddhist'}, {'id': 14814, 'synset': 'mahayanist.n.01', 'name': 'Mahayanist'}, {'id': 14815, 'synset': 'swami.n.01', 'name': 'swami'}, {'id': 14816, 'synset': 'hare_krishna.n.01', 'name': 'Hare_Krishna'}, {'id': 14817, 'synset': 'shintoist.n.01', 'name': 'Shintoist'}, {'id': 14818, 'synset': 'eurafrican.n.01', 'name': 'Eurafrican'}, {'id': 14819, 'synset': 'eurasian.n.01', 'name': 'Eurasian'}, {'id': 14820, 'synset': 'gael.n.01', 'name': 'Gael'}, {'id': 14821, 'synset': 'frank.n.01', 'name': 'Frank'}, {'id': 14822, 'synset': 'afghan.n.02', 'name': 'Afghan'}, {'id': 14823, 'synset': 'albanian.n.01', 'name': 'Albanian'}, {'id': 14824, 'synset': 'algerian.n.01', 'name': 'Algerian'}, {'id': 14825, 'synset': 'altaic.n.01', 'name': 'Altaic'}, {'id': 14826, 'synset': 'andorran.n.01', 'name': 'Andorran'}, {'id': 14827, 'synset': 'angolan.n.01', 'name': 'Angolan'}, {'id': 14828, 'synset': 'anguillan.n.01', 'name': 'Anguillan'}, {'id': 14829, 'synset': 'austrian.n.01', 'name': 'Austrian'}, {'id': 14830, 'synset': 'bahamian.n.01', 'name': 'Bahamian'}, {'id': 14831, 'synset': 'bahraini.n.01', 'name': 'Bahraini'}, {'id': 14832, 'synset': 'basotho.n.01', 'name': 'Basotho'}, {'id': 14833, 'synset': 'herero.n.01', 'name': 'Herero'}, {'id': 14834, 'synset': 'luba.n.01', 'name': 'Luba'}, {'id': 14835, 'synset': 'barbadian.n.01', 'name': 'Barbadian'}, {'id': 14836, 'synset': 'bolivian.n.01', 'name': 'Bolivian'}, {'id': 14837, 'synset': 'bornean.n.01', 'name': 'Bornean'}, {'id': 14838, 'synset': 'carioca.n.01', 'name': 'Carioca'}, {'id': 14839, 'synset': 'tupi.n.01', 'name': 'Tupi'}, {'id': 14840, 'synset': 'bruneian.n.01', 'name': 'Bruneian'}, {'id': 14841, 'synset': 'bulgarian.n.01', 'name': 'Bulgarian'}, {'id': 14842, 'synset': 'byelorussian.n.01', 'name': 'Byelorussian'}, {'id': 14843, 'synset': 'cameroonian.n.01', 'name': 'Cameroonian'}, {'id': 14844, 'synset': 'canadian.n.01', 'name': 'Canadian'}, {'id': 14845, 'synset': 'french_canadian.n.01', 'name': 'French_Canadian'}, {'id': 14846, 'synset': 'central_american.n.01', 'name': 'Central_American'}, {'id': 14847, 'synset': 'chilean.n.01', 'name': 'Chilean'}, {'id': 14848, 'synset': 'congolese.n.01', 'name': 'Congolese'}, {'id': 14849, 'synset': 'cypriot.n.01', 'name': 'Cypriot'}, {'id': 14850, 'synset': 'dane.n.01', 'name': 'Dane'}, {'id': 14851, 'synset': 'djiboutian.n.01', 'name': 'Djiboutian'}, {'id': 14852, 'synset': 'britisher.n.01', 'name': 'Britisher'}, {'id': 14853, 'synset': 'english_person.n.01', 'name': 'English_person'}, {'id': 14854, 'synset': 'englishwoman.n.01', 'name': 'Englishwoman'}, {'id': 14855, 'synset': 'anglo-saxon.n.02', 'name': 'Anglo-Saxon'}, {'id': 14856, 'synset': 'angle.n.03', 'name': 'Angle'}, {'id': 14857, 'synset': 'west_saxon.n.01', 'name': 'West_Saxon'}, {'id': 14858, 'synset': 'lombard.n.01', 'name': 'Lombard'}, {'id': 14859, 'synset': 'limey.n.01', 'name': 'limey'}, {'id': 14860, 'synset': 'cantabrigian.n.01', 'name': 'Cantabrigian'}, {'id': 14861, 'synset': 'cornishman.n.01', 'name': 'Cornishman'}, {'id': 14862, 'synset': 'cornishwoman.n.01', 'name': 'Cornishwoman'}, {'id': 14863, 'synset': 'lancastrian.n.02', 'name': 'Lancastrian'}, {'id': 14864, 'synset': 'lancastrian.n.01', 'name': 'Lancastrian'}, {'id': 14865, 'synset': 'geordie.n.01', 'name': 'Geordie'}, {'id': 14866, 'synset': 'oxonian.n.01', 'name': 'Oxonian'}, {'id': 14867, 'synset': 'ethiopian.n.01', 'name': 'Ethiopian'}, {'id': 14868, 'synset': 'amhara.n.01', 'name': 'Amhara'}, {'id': 14869, 'synset': 'eritrean.n.01', 'name': 'Eritrean'}, {'id': 14870, 'synset': 'finn.n.01', 'name': 'Finn'}, {'id': 14871, 'synset': 'komi.n.01', 'name': 'Komi'}, {'id': 14872, 'synset': 'livonian.n.01', 'name': 'Livonian'}, {'id': 14873, 'synset': 'lithuanian.n.01', 'name': 'Lithuanian'}, {'id': 14874, 'synset': 'selkup.n.01', 'name': 'Selkup'}, {'id': 14875, 'synset': 'parisian.n.01', 'name': 'Parisian'}, {'id': 14876, 'synset': 'parisienne.n.01', 'name': 'Parisienne'}, {'id': 14877, 'synset': 'creole.n.02', 'name': 'Creole'}, {'id': 14878, 'synset': 'creole.n.01', 'name': 'Creole'}, {'id': 14879, 'synset': 'gabonese.n.01', 'name': 'Gabonese'}, {'id': 14880, 'synset': 'greek.n.02', 'name': 'Greek'}, {'id': 14881, 'synset': 'dorian.n.01', 'name': 'Dorian'}, {'id': 14882, 'synset': 'athenian.n.01', 'name': 'Athenian'}, {'id': 14883, 'synset': 'laconian.n.01', 'name': 'Laconian'}, {'id': 14884, 'synset': 'guyanese.n.01', 'name': 'Guyanese'}, {'id': 14885, 'synset': 'haitian.n.01', 'name': 'Haitian'}, {'id': 14886, 'synset': 'malay.n.01', 'name': 'Malay'}, {'id': 14887, 'synset': 'moro.n.01', 'name': 'Moro'}, {'id': 14888, 'synset': 'netherlander.n.01', 'name': 'Netherlander'}, {'id': 14889, 'synset': 'icelander.n.01', 'name': 'Icelander'}, {'id': 14890, 'synset': 'iraqi.n.01', 'name': 'Iraqi'}, {'id': 14891, 'synset': 'irishman.n.01', 'name': 'Irishman'}, {'id': 14892, 'synset': 'irishwoman.n.01', 'name': 'Irishwoman'}, {'id': 14893, 'synset': 'dubliner.n.01', 'name': 'Dubliner'}, {'id': 14894, 'synset': 'italian.n.01', 'name': 'Italian'}, {'id': 14895, 'synset': 'roman.n.01', 'name': 'Roman'}, {'id': 14896, 'synset': 'sabine.n.02', 'name': 'Sabine'}, {'id': 14897, 'synset': 'japanese.n.01', 'name': 'Japanese'}, {'id': 14898, 'synset': 'jordanian.n.01', 'name': 'Jordanian'}, {'id': 14899, 'synset': 'korean.n.01', 'name': 'Korean'}, {'id': 14900, 'synset': 'kenyan.n.01', 'name': 'Kenyan'}, {'id': 14901, 'synset': 'lao.n.01', 'name': 'Lao'}, {'id': 14902, 'synset': 'lapp.n.01', 'name': 'Lapp'}, {'id': 14903, 'synset': 'latin_american.n.01', 'name': 'Latin_American'}, {'id': 14904, 'synset': 'lebanese.n.01', 'name': 'Lebanese'}, {'id': 14905, 'synset': 'levantine.n.01', 'name': 'Levantine'}, {'id': 14906, 'synset': 'liberian.n.01', 'name': 'Liberian'}, {'id': 14907, 'synset': 'luxemburger.n.01', 'name': 'Luxemburger'}, {'id': 14908, 'synset': 'macedonian.n.01', 'name': 'Macedonian'}, {'id': 14909, 'synset': 'sabahan.n.01', 'name': 'Sabahan'}, {'id': 14910, 'synset': 'mexican.n.01', 'name': 'Mexican'}, {'id': 14911, 'synset': 'chicano.n.01', 'name': 'Chicano'}, {'id': 14912, 'synset': 'mexican-american.n.01', 'name': 'Mexican-American'}, {'id': 14913, 'synset': 'namibian.n.01', 'name': 'Namibian'}, {'id': 14914, 'synset': 'nauruan.n.01', 'name': 'Nauruan'}, {'id': 14915, 'synset': 'gurkha.n.02', 'name': 'Gurkha'}, {'id': 14916, 'synset': 'new_zealander.n.01', 'name': 'New_Zealander'}, {'id': 14917, 'synset': 'nicaraguan.n.01', 'name': 'Nicaraguan'}, {'id': 14918, 'synset': 'nigerian.n.01', 'name': 'Nigerian'}, {'id': 14919, 'synset': 'hausa.n.01', 'name': 'Hausa'}, {'id': 14920, 'synset': 'north_american.n.01', 'name': 'North_American'}, {'id': 14921, 'synset': 'nova_scotian.n.01', 'name': 'Nova_Scotian'}, {'id': 14922, 'synset': 'omani.n.01', 'name': 'Omani'}, {'id': 14923, 'synset': 'pakistani.n.01', 'name': 'Pakistani'}, {'id': 14924, 'synset': 'brahui.n.01', 'name': 'Brahui'}, {'id': 14925, 'synset': 'south_american_indian.n.01', 'name': 'South_American_Indian'}, {'id': 14926, 'synset': 'carib.n.01', 'name': 'Carib'}, {'id': 14927, 'synset': 'filipino.n.01', 'name': 'Filipino'}, {'id': 14928, 'synset': 'polynesian.n.01', 'name': 'Polynesian'}, {'id': 14929, 'synset': 'qatari.n.01', 'name': 'Qatari'}, {'id': 14930, 'synset': 'romanian.n.01', 'name': 'Romanian'}, {'id': 14931, 'synset': 'muscovite.n.02', 'name': 'Muscovite'}, {'id': 14932, 'synset': 'georgian.n.02', 'name': 'Georgian'}, {'id': 14933, 'synset': 'sarawakian.n.01', 'name': 'Sarawakian'}, {'id': 14934, 'synset': 'scandinavian.n.01', 'name': 'Scandinavian'}, {'id': 14935, 'synset': 'senegalese.n.01', 'name': 'Senegalese'}, {'id': 14936, 'synset': 'slovene.n.01', 'name': 'Slovene'}, {'id': 14937, 'synset': 'south_african.n.01', 'name': 'South_African'}, {'id': 14938, 'synset': 'south_american.n.01', 'name': 'South_American'}, {'id': 14939, 'synset': 'sudanese.n.01', 'name': 'Sudanese'}, {'id': 14940, 'synset': 'syrian.n.01', 'name': 'Syrian'}, {'id': 14941, 'synset': 'tahitian.n.01', 'name': 'Tahitian'}, {'id': 14942, 'synset': 'tanzanian.n.01', 'name': 'Tanzanian'}, {'id': 14943, 'synset': 'tibetan.n.02', 'name': 'Tibetan'}, {'id': 14944, 'synset': 'togolese.n.01', 'name': 'Togolese'}, {'id': 14945, 'synset': 'tuareg.n.01', 'name': 'Tuareg'}, {'id': 14946, 'synset': 'turki.n.01', 'name': 'Turki'}, {'id': 14947, 'synset': 'chuvash.n.01', 'name': 'Chuvash'}, {'id': 14948, 'synset': 'turkoman.n.01', 'name': 'Turkoman'}, {'id': 14949, 'synset': 'uzbek.n.01', 'name': 'Uzbek'}, {'id': 14950, 'synset': 'ugandan.n.01', 'name': 'Ugandan'}, {'id': 14951, 'synset': 'ukranian.n.01', 'name': 'Ukranian'}, {'id': 14952, 'synset': 'yakut.n.01', 'name': 'Yakut'}, {'id': 14953, 'synset': 'tungus.n.01', 'name': 'Tungus'}, {'id': 14954, 'synset': 'igbo.n.01', 'name': 'Igbo'}, {'id': 14955, 'synset': 'american.n.03', 'name': 'American'}, {'id': 14956, 'synset': 'anglo-american.n.01', 'name': 'Anglo-American'}, {'id': 14957, 'synset': 'alaska_native.n.01', 'name': 'Alaska_Native'}, {'id': 14958, 'synset': 'arkansan.n.01', 'name': 'Arkansan'}, {'id': 14959, 'synset': 'carolinian.n.01', 'name': 'Carolinian'}, {'id': 14960, 'synset': 'coloradan.n.01', 'name': 'Coloradan'}, {'id': 14961, 'synset': 'connecticuter.n.01', 'name': 'Connecticuter'}, {'id': 14962, 'synset': 'delawarean.n.01', 'name': 'Delawarean'}, {'id': 14963, 'synset': 'floridian.n.01', 'name': 'Floridian'}, {'id': 14964, 'synset': 'german_american.n.01', 'name': 'German_American'}, {'id': 14965, 'synset': 'illinoisan.n.01', 'name': 'Illinoisan'}, {'id': 14966, 'synset': 'mainer.n.01', 'name': 'Mainer'}, {'id': 14967, 'synset': 'marylander.n.01', 'name': 'Marylander'}, {'id': 14968, 'synset': 'minnesotan.n.01', 'name': 'Minnesotan'}, {'id': 14969, 'synset': 'nebraskan.n.01', 'name': 'Nebraskan'}, {'id': 14970, 'synset': 'new_hampshirite.n.01', 'name': 'New_Hampshirite'}, {'id': 14971, 'synset': 'new_jerseyan.n.01', 'name': 'New_Jerseyan'}, {'id': 14972, 'synset': 'new_yorker.n.01', 'name': 'New_Yorker'}, {'id': 14973, 'synset': 'north_carolinian.n.01', 'name': 'North_Carolinian'}, {'id': 14974, 'synset': 'oregonian.n.01', 'name': 'Oregonian'}, {'id': 14975, 'synset': 'pennsylvanian.n.02', 'name': 'Pennsylvanian'}, {'id': 14976, 'synset': 'texan.n.01', 'name': 'Texan'}, {'id': 14977, 'synset': 'utahan.n.01', 'name': 'Utahan'}, {'id': 14978, 'synset': 'uruguayan.n.01', 'name': 'Uruguayan'}, {'id': 14979, 'synset': 'vietnamese.n.01', 'name': 'Vietnamese'}, {'id': 14980, 'synset': 'gambian.n.01', 'name': 'Gambian'}, {'id': 14981, 'synset': 'east_german.n.01', 'name': 'East_German'}, {'id': 14982, 'synset': 'berliner.n.01', 'name': 'Berliner'}, {'id': 14983, 'synset': 'prussian.n.01', 'name': 'Prussian'}, {'id': 14984, 'synset': 'ghanian.n.01', 'name': 'Ghanian'}, {'id': 14985, 'synset': 'guinean.n.01', 'name': 'Guinean'}, {'id': 14986, 'synset': 'papuan.n.01', 'name': 'Papuan'}, {'id': 14987, 'synset': 'walloon.n.01', 'name': 'Walloon'}, {'id': 14988, 'synset': 'yemeni.n.01', 'name': 'Yemeni'}, {'id': 14989, 'synset': 'yugoslav.n.01', 'name': 'Yugoslav'}, {'id': 14990, 'synset': 'serbian.n.01', 'name': 'Serbian'}, {'id': 14991, 'synset': 'xhosa.n.01', 'name': 'Xhosa'}, {'id': 14992, 'synset': 'zairese.n.01', 'name': 'Zairese'}, {'id': 14993, 'synset': 'zimbabwean.n.01', 'name': 'Zimbabwean'}, {'id': 14994, 'synset': 'zulu.n.01', 'name': 'Zulu'}, {'id': 14995, 'synset': 'gemini.n.01', 'name': 'Gemini'}, {'id': 14996, 'synset': 'sagittarius.n.01', 'name': 'Sagittarius'}, {'id': 14997, 'synset': 'pisces.n.02', 'name': 'Pisces'}, {'id': 14998, 'synset': 'abbe.n.01', 'name': 'abbe'}, {'id': 14999, 'synset': 'abbess.n.01', 'name': 'abbess'}, {'id': 15000, 'synset': 'abnegator.n.01', 'name': 'abnegator'}, {'id': 15001, 'synset': 'abridger.n.01', 'name': 'abridger'}, {'id': 15002, 'synset': 'abstractor.n.01', 'name': 'abstractor'}, {'id': 15003, 'synset': 'absconder.n.01', 'name': 'absconder'}, {'id': 15004, 'synset': 'absolver.n.01', 'name': 'absolver'}, {'id': 15005, 'synset': 'abecedarian.n.01', 'name': 'abecedarian'}, {'id': 15006, 'synset': 'aberrant.n.01', 'name': 'aberrant'}, {'id': 15007, 'synset': 'abettor.n.01', 'name': 'abettor'}, {'id': 15008, 'synset': 'abhorrer.n.01', 'name': 'abhorrer'}, {'id': 15009, 'synset': 'abomination.n.01', 'name': 'abomination'}, {'id': 15010, 'synset': 'abseiler.n.01', 'name': 'abseiler'}, {'id': 15011, 'synset': 'abstainer.n.01', 'name': 'abstainer'}, {'id': 15012, 'synset': 'academic_administrator.n.01', 'name': 'academic_administrator'}, {'id': 15013, 'synset': 'academician.n.01', 'name': 'academician'}, {'id': 15014, 'synset': 'accessory_before_the_fact.n.01', 'name': 'accessory_before_the_fact'}, {'id': 15015, 'synset': 'companion.n.03', 'name': 'companion'}, {'id': 15016, 'synset': 'accompanist.n.01', 'name': 'accompanist'}, {'id': 15017, 'synset': 'accomplice.n.01', 'name': 'accomplice'}, {'id': 15018, 'synset': 'account_executive.n.01', 'name': 'account_executive'}, {'id': 15019, 'synset': 'accused.n.01', 'name': 'accused'}, {'id': 15020, 'synset': 'accuser.n.01', 'name': 'accuser'}, {'id': 15021, 'synset': 'acid_head.n.01', 'name': 'acid_head'}, {'id': 15022, 'synset': 'acquaintance.n.03', 'name': 'acquaintance'}, {'id': 15023, 'synset': 'acquirer.n.01', 'name': 'acquirer'}, {'id': 15024, 'synset': 'aerialist.n.01', 'name': 'aerialist'}, {'id': 15025, 'synset': 'action_officer.n.01', 'name': 'action_officer'}, {'id': 15026, 'synset': 'active.n.03', 'name': 'active'}, {'id': 15027, 'synset': 'active_citizen.n.01', 'name': 'active_citizen'}, {'id': 15028, 'synset': 'actor.n.01', 'name': 'actor'}, {'id': 15029, 'synset': 'actor.n.02', 'name': 'actor'}, {'id': 15030, 'synset': 'addict.n.01', 'name': 'addict'}, {'id': 15031, 'synset': 'adducer.n.01', 'name': 'adducer'}, {'id': 15032, 'synset': 'adjuster.n.01', 'name': 'adjuster'}, {'id': 15033, 'synset': 'adjutant.n.01', 'name': 'adjutant'}, {'id': 15034, 'synset': 'adjutant_general.n.01', 'name': 'adjutant_general'}, {'id': 15035, 'synset': 'admirer.n.03', 'name': 'admirer'}, {'id': 15036, 'synset': 'adoptee.n.01', 'name': 'adoptee'}, {'id': 15037, 'synset': 'adulterer.n.01', 'name': 'adulterer'}, {'id': 15038, 'synset': 'adulteress.n.01', 'name': 'adulteress'}, {'id': 15039, 'synset': 'advertiser.n.01', 'name': 'advertiser'}, {'id': 15040, 'synset': 'advisee.n.01', 'name': 'advisee'}, {'id': 15041, 'synset': 'advocate.n.01', 'name': 'advocate'}, {'id': 15042, 'synset': 'aeronautical_engineer.n.01', 'name': 'aeronautical_engineer'}, {'id': 15043, 'synset': 'affiliate.n.01', 'name': 'affiliate'}, {'id': 15044, 'synset': 'affluent.n.01', 'name': 'affluent'}, {'id': 15045, 'synset': 'aficionado.n.02', 'name': 'aficionado'}, {'id': 15046, 'synset': 'buck_sergeant.n.01', 'name': 'buck_sergeant'}, {'id': 15047, 'synset': 'agent-in-place.n.01', 'name': 'agent-in-place'}, {'id': 15048, 'synset': 'aggravator.n.01', 'name': 'aggravator'}, {'id': 15049, 'synset': 'agitator.n.01', 'name': 'agitator'}, {'id': 15050, 'synset': 'agnostic.n.02', 'name': 'agnostic'}, {'id': 15051, 'synset': 'agnostic.n.01', 'name': 'agnostic'}, {'id': 15052, 'synset': 'agonist.n.02', 'name': 'agonist'}, {'id': 15053, 'synset': 'agony_aunt.n.01', 'name': 'agony_aunt'}, {'id': 15054, 'synset': 'agriculturist.n.01', 'name': 'agriculturist'}, {'id': 15055, 'synset': 'air_attache.n.01', 'name': 'air_attache'}, {'id': 15056, 'synset': 'air_force_officer.n.01', 'name': 'air_force_officer'}, {'id': 15057, 'synset': 'airhead.n.01', 'name': 'airhead'}, {'id': 15058, 'synset': 'air_traveler.n.01', 'name': 'air_traveler'}, {'id': 15059, 'synset': 'alarmist.n.01', 'name': 'alarmist'}, {'id': 15060, 'synset': 'albino.n.01', 'name': 'albino'}, {'id': 15061, 'synset': 'alcoholic.n.01', 'name': 'alcoholic'}, {'id': 15062, 'synset': 'alderman.n.01', 'name': 'alderman'}, {'id': 15063, 'synset': 'alexic.n.01', 'name': 'alexic'}, {'id': 15064, 'synset': 'alienee.n.01', 'name': 'alienee'}, {'id': 15065, 'synset': 'alienor.n.01', 'name': 'alienor'}, {'id': 15066, 'synset': 'aliterate.n.01', 'name': 'aliterate'}, {'id': 15067, 'synset': 'algebraist.n.01', 'name': 'algebraist'}, {'id': 15068, 'synset': 'allegorizer.n.01', 'name': 'allegorizer'}, {'id': 15069, 'synset': 'alliterator.n.01', 'name': 'alliterator'}, {'id': 15070, 'synset': 'almoner.n.01', 'name': 'almoner'}, {'id': 15071, 'synset': 'alpinist.n.01', 'name': 'alpinist'}, {'id': 15072, 'synset': 'altar_boy.n.01', 'name': 'altar_boy'}, {'id': 15073, 'synset': 'alto.n.01', 'name': 'alto'}, {'id': 15074, 'synset': 'ambassador.n.01', 'name': 'ambassador'}, {'id': 15075, 'synset': 'ambassador.n.02', 'name': 'ambassador'}, {'id': 15076, 'synset': 'ambusher.n.01', 'name': 'ambusher'}, {'id': 15077, 'synset': 'amicus_curiae.n.01', 'name': 'amicus_curiae'}, {'id': 15078, 'synset': 'amoralist.n.01', 'name': 'amoralist'}, {'id': 15079, 'synset': 'amputee.n.01', 'name': 'amputee'}, {'id': 15080, 'synset': 'analogist.n.01', 'name': 'analogist'}, {'id': 15081, 'synset': 'analphabet.n.01', 'name': 'analphabet'}, {'id': 15082, 'synset': 'analyst.n.01', 'name': 'analyst'}, {'id': 15083, 'synset': 'industry_analyst.n.01', 'name': 'industry_analyst'}, {'id': 15084, 'synset': 'market_strategist.n.01', 'name': 'market_strategist'}, {'id': 15085, 'synset': 'anarchist.n.01', 'name': 'anarchist'}, {'id': 15086, 'synset': 'anathema.n.01', 'name': 'anathema'}, {'id': 15087, 'synset': 'ancestor.n.01', 'name': 'ancestor'}, {'id': 15088, 'synset': 'anchor.n.03', 'name': 'anchor'}, {'id': 15089, 'synset': 'ancient.n.02', 'name': 'ancient'}, {'id': 15090, 'synset': 'anecdotist.n.01', 'name': 'anecdotist'}, {'id': 15091, 'synset': 'angler.n.02', 'name': 'angler'}, {'id': 15092, 'synset': 'animator.n.02', 'name': 'animator'}, {'id': 15093, 'synset': 'animist.n.01', 'name': 'animist'}, {'id': 15094, 'synset': 'annotator.n.01', 'name': 'annotator'}, {'id': 15095, 'synset': 'announcer.n.02', 'name': 'announcer'}, {'id': 15096, 'synset': 'announcer.n.01', 'name': 'announcer'}, {'id': 15097, 'synset': 'anti.n.01', 'name': 'anti'}, {'id': 15098, 'synset': 'anti-american.n.01', 'name': 'anti-American'}, {'id': 15099, 'synset': 'anti-semite.n.01', 'name': 'anti-Semite'}, {'id': 15100, 'synset': 'anzac.n.01', 'name': 'Anzac'}, {'id': 15101, 'synset': 'ape-man.n.02', 'name': 'ape-man'}, {'id': 15102, 'synset': 'aphakic.n.01', 'name': 'aphakic'}, {'id': 15103, 'synset': 'appellant.n.01', 'name': 'appellant'}, {'id': 15104, 'synset': 'appointee.n.01', 'name': 'appointee'}, {'id': 15105, 'synset': 'apprehender.n.02', 'name': 'apprehender'}, {'id': 15106, 'synset': 'april_fool.n.01', 'name': 'April_fool'}, {'id': 15107, 'synset': 'aspirant.n.01', 'name': 'aspirant'}, {'id': 15108, 'synset': 'appreciator.n.01', 'name': 'appreciator'}, {'id': 15109, 'synset': 'appropriator.n.01', 'name': 'appropriator'}, {'id': 15110, 'synset': 'arabist.n.01', 'name': 'Arabist'}, {'id': 15111, 'synset': 'archaist.n.01', 'name': 'archaist'}, {'id': 15112, 'synset': 'archbishop.n.01', 'name': 'archbishop'}, {'id': 15113, 'synset': 'archer.n.01', 'name': 'archer'}, {'id': 15114, 'synset': 'architect.n.01', 'name': 'architect'}, {'id': 15115, 'synset': 'archivist.n.01', 'name': 'archivist'}, {'id': 15116, 'synset': 'archpriest.n.01', 'name': 'archpriest'}, {'id': 15117, 'synset': 'aristotelian.n.01', 'name': 'Aristotelian'}, {'id': 15118, 'synset': 'armiger.n.02', 'name': 'armiger'}, {'id': 15119, 'synset': 'army_attache.n.01', 'name': 'army_attache'}, {'id': 15120, 'synset': 'army_engineer.n.01', 'name': 'army_engineer'}, {'id': 15121, 'synset': 'army_officer.n.01', 'name': 'army_officer'}, {'id': 15122, 'synset': 'arranger.n.02', 'name': 'arranger'}, {'id': 15123, 'synset': 'arrival.n.03', 'name': 'arrival'}, {'id': 15124, 'synset': 'arthritic.n.01', 'name': 'arthritic'}, {'id': 15125, 'synset': 'articulator.n.01', 'name': 'articulator'}, {'id': 15126, 'synset': 'artilleryman.n.01', 'name': 'artilleryman'}, {'id': 15127, 'synset': "artist's_model.n.01", 'name': "artist's_model"}, {'id': 15128, 'synset': 'assayer.n.01', 'name': 'assayer'}, {'id': 15129, 'synset': 'assemblyman.n.01', 'name': 'assemblyman'}, {'id': 15130, 'synset': 'assemblywoman.n.01', 'name': 'assemblywoman'}, {'id': 15131, 'synset': 'assenter.n.01', 'name': 'assenter'}, {'id': 15132, 'synset': 'asserter.n.01', 'name': 'asserter'}, {'id': 15133, 'synset': 'assignee.n.01', 'name': 'assignee'}, {'id': 15134, 'synset': 'assistant.n.01', 'name': 'assistant'}, {'id': 15135, 'synset': 'assistant_professor.n.01', 'name': 'assistant_professor'}, {'id': 15136, 'synset': 'associate.n.01', 'name': 'associate'}, {'id': 15137, 'synset': 'associate.n.03', 'name': 'associate'}, {'id': 15138, 'synset': 'associate_professor.n.01', 'name': 'associate_professor'}, {'id': 15139, 'synset': 'astronaut.n.01', 'name': 'astronaut'}, {'id': 15140, 'synset': 'cosmographer.n.01', 'name': 'cosmographer'}, {'id': 15141, 'synset': 'atheist.n.01', 'name': 'atheist'}, {'id': 15142, 'synset': 'athlete.n.01', 'name': 'athlete'}, {'id': 15143, 'synset': 'attendant.n.01', 'name': 'attendant'}, {'id': 15144, 'synset': 'attorney_general.n.01', 'name': 'attorney_general'}, {'id': 15145, 'synset': 'auditor.n.02', 'name': 'auditor'}, {'id': 15146, 'synset': 'augur.n.01', 'name': 'augur'}, {'id': 15147, 'synset': 'aunt.n.01', 'name': 'aunt'}, {'id': 15148, 'synset': 'au_pair_girl.n.01', 'name': 'au_pair_girl'}, {'id': 15149, 'synset': 'authoritarian.n.01', 'name': 'authoritarian'}, {'id': 15150, 'synset': 'authority.n.02', 'name': 'authority'}, {'id': 15151, 'synset': 'authorizer.n.01', 'name': 'authorizer'}, {'id': 15152, 'synset': 'automobile_mechanic.n.01', 'name': 'automobile_mechanic'}, {'id': 15153, 'synset': 'aviator.n.01', 'name': 'aviator'}, {'id': 15154, 'synset': 'aviatrix.n.01', 'name': 'aviatrix'}, {'id': 15155, 'synset': 'ayah.n.01', 'name': 'ayah'}, {'id': 15156, 'synset': 'babu.n.01', 'name': 'babu'}, {'id': 15157, 'synset': 'baby.n.05', 'name': 'baby'}, {'id': 15158, 'synset': 'baby.n.04', 'name': 'baby'}, {'id': 15159, 'synset': 'baby_boomer.n.01', 'name': 'baby_boomer'}, {'id': 15160, 'synset': 'baby_farmer.n.01', 'name': 'baby_farmer'}, {'id': 15161, 'synset': 'back.n.04', 'name': 'back'}, {'id': 15162, 'synset': 'backbencher.n.01', 'name': 'backbencher'}, {'id': 15163, 'synset': 'backpacker.n.01', 'name': 'backpacker'}, {'id': 15164, 'synset': 'backroom_boy.n.01', 'name': 'backroom_boy'}, {'id': 15165, 'synset': 'backscratcher.n.01', 'name': 'backscratcher'}, {'id': 15166, 'synset': 'bad_person.n.01', 'name': 'bad_person'}, {'id': 15167, 'synset': 'baggage.n.02', 'name': 'baggage'}, {'id': 15168, 'synset': 'bag_lady.n.01', 'name': 'bag_lady'}, {'id': 15169, 'synset': 'bailee.n.01', 'name': 'bailee'}, {'id': 15170, 'synset': 'bailiff.n.01', 'name': 'bailiff'}, {'id': 15171, 'synset': 'bailor.n.01', 'name': 'bailor'}, {'id': 15172, 'synset': 'bairn.n.01', 'name': 'bairn'}, {'id': 15173, 'synset': 'baker.n.02', 'name': 'baker'}, {'id': 15174, 'synset': 'balancer.n.01', 'name': 'balancer'}, {'id': 15175, 'synset': 'balker.n.01', 'name': 'balker'}, {'id': 15176, 'synset': 'ball-buster.n.01', 'name': 'ball-buster'}, {'id': 15177, 'synset': 'ball_carrier.n.01', 'name': 'ball_carrier'}, {'id': 15178, 'synset': 'ballet_dancer.n.01', 'name': 'ballet_dancer'}, {'id': 15179, 'synset': 'ballet_master.n.01', 'name': 'ballet_master'}, {'id': 15180, 'synset': 'ballet_mistress.n.01', 'name': 'ballet_mistress'}, {'id': 15181, 'synset': 'balletomane.n.01', 'name': 'balletomane'}, {'id': 15182, 'synset': 'ball_hawk.n.01', 'name': 'ball_hawk'}, {'id': 15183, 'synset': 'balloonist.n.01', 'name': 'balloonist'}, {'id': 15184, 'synset': 'ballplayer.n.01', 'name': 'ballplayer'}, {'id': 15185, 'synset': 'bullfighter.n.01', 'name': 'bullfighter'}, {'id': 15186, 'synset': 'banderillero.n.01', 'name': 'banderillero'}, {'id': 15187, 'synset': 'matador.n.01', 'name': 'matador'}, {'id': 15188, 'synset': 'picador.n.01', 'name': 'picador'}, {'id': 15189, 'synset': 'bandsman.n.01', 'name': 'bandsman'}, {'id': 15190, 'synset': 'banker.n.02', 'name': 'banker'}, {'id': 15191, 'synset': 'bank_robber.n.01', 'name': 'bank_robber'}, {'id': 15192, 'synset': 'bankrupt.n.01', 'name': 'bankrupt'}, {'id': 15193, 'synset': 'bantamweight.n.01', 'name': 'bantamweight'}, {'id': 15194, 'synset': 'barmaid.n.01', 'name': 'barmaid'}, {'id': 15195, 'synset': 'baron.n.03', 'name': 'baron'}, {'id': 15196, 'synset': 'baron.n.02', 'name': 'baron'}, {'id': 15197, 'synset': 'baron.n.01', 'name': 'baron'}, {'id': 15198, 'synset': 'bartender.n.01', 'name': 'bartender'}, {'id': 15199, 'synset': 'baseball_coach.n.01', 'name': 'baseball_coach'}, {'id': 15200, 'synset': 'base_runner.n.01', 'name': 'base_runner'}, {'id': 15201, 'synset': 'basketball_player.n.01', 'name': 'basketball_player'}, {'id': 15202, 'synset': 'basketweaver.n.01', 'name': 'basketweaver'}, {'id': 15203, 'synset': 'basket_maker.n.01', 'name': 'Basket_Maker'}, {'id': 15204, 'synset': 'bass.n.03', 'name': 'bass'}, {'id': 15205, 'synset': 'bastard.n.02', 'name': 'bastard'}, {'id': 15206, 'synset': 'bat_boy.n.01', 'name': 'bat_boy'}, {'id': 15207, 'synset': 'bather.n.02', 'name': 'bather'}, {'id': 15208, 'synset': 'batman.n.01', 'name': 'batman'}, {'id': 15209, 'synset': 'baton_twirler.n.01', 'name': 'baton_twirler'}, {'id': 15210, 'synset': 'bavarian.n.01', 'name': 'Bavarian'}, {'id': 15211, 'synset': 'beadsman.n.01', 'name': 'beadsman'}, {'id': 15212, 'synset': 'beard.n.03', 'name': 'beard'}, {'id': 15213, 'synset': 'beatnik.n.01', 'name': 'beatnik'}, {'id': 15214, 'synset': 'beauty_consultant.n.01', 'name': 'beauty_consultant'}, {'id': 15215, 'synset': 'bedouin.n.01', 'name': 'Bedouin'}, {'id': 15216, 'synset': 'bedwetter.n.01', 'name': 'bedwetter'}, {'id': 15217, 'synset': 'beekeeper.n.01', 'name': 'beekeeper'}, {'id': 15218, 'synset': 'beer_drinker.n.01', 'name': 'beer_drinker'}, {'id': 15219, 'synset': 'beggarman.n.01', 'name': 'beggarman'}, {'id': 15220, 'synset': 'beggarwoman.n.01', 'name': 'beggarwoman'}, {'id': 15221, 'synset': 'beldam.n.02', 'name': 'beldam'}, {'id': 15222, 'synset': 'theist.n.01', 'name': 'theist'}, {'id': 15223, 'synset': 'believer.n.01', 'name': 'believer'}, {'id': 15224, 'synset': 'bell_founder.n.01', 'name': 'bell_founder'}, {'id': 15225, 'synset': 'benedick.n.01', 'name': 'benedick'}, {'id': 15226, 'synset': 'berserker.n.01', 'name': 'berserker'}, {'id': 15227, 'synset': 'besieger.n.01', 'name': 'besieger'}, {'id': 15228, 'synset': 'best.n.02', 'name': 'best'}, {'id': 15229, 'synset': 'betrothed.n.01', 'name': 'betrothed'}, {'id': 15230, 'synset': 'big_brother.n.01', 'name': 'Big_Brother'}, {'id': 15231, 'synset': 'bigot.n.01', 'name': 'bigot'}, {'id': 15232, 'synset': 'big_shot.n.01', 'name': 'big_shot'}, {'id': 15233, 'synset': 'big_sister.n.01', 'name': 'big_sister'}, {'id': 15234, 'synset': 'billiard_player.n.01', 'name': 'billiard_player'}, {'id': 15235, 'synset': 'biochemist.n.01', 'name': 'biochemist'}, {'id': 15236, 'synset': 'biographer.n.01', 'name': 'biographer'}, {'id': 15237, 'synset': 'bird_fancier.n.01', 'name': 'bird_fancier'}, {'id': 15238, 'synset': 'birth.n.05', 'name': 'birth'}, {'id': 15239, 'synset': 'birth-control_campaigner.n.01', 'name': 'birth-control_campaigner'}, {'id': 15240, 'synset': 'bisexual.n.01', 'name': 'bisexual'}, {'id': 15241, 'synset': 'black_belt.n.01', 'name': 'black_belt'}, {'id': 15242, 'synset': 'blackmailer.n.01', 'name': 'blackmailer'}, {'id': 15243, 'synset': 'black_muslim.n.01', 'name': 'Black_Muslim'}, {'id': 15244, 'synset': 'blacksmith.n.01', 'name': 'blacksmith'}, {'id': 15245, 'synset': 'blade.n.02', 'name': 'blade'}, {'id': 15246, 'synset': 'blind_date.n.01', 'name': 'blind_date'}, {'id': 15247, 'synset': 'bluecoat.n.01', 'name': 'bluecoat'}, {'id': 15248, 'synset': 'bluestocking.n.01', 'name': 'bluestocking'}, {'id': 15249, 'synset': 'boatbuilder.n.01', 'name': 'boatbuilder'}, {'id': 15250, 'synset': 'boatman.n.01', 'name': 'boatman'}, {'id': 15251, 'synset': 'boatswain.n.01', 'name': 'boatswain'}, {'id': 15252, 'synset': 'bobby.n.01', 'name': 'bobby'}, {'id': 15253, 'synset': 'bodyguard.n.01', 'name': 'bodyguard'}, {'id': 15254, 'synset': 'boffin.n.01', 'name': 'boffin'}, {'id': 15255, 'synset': 'bolshevik.n.01', 'name': 'Bolshevik'}, {'id': 15256, 'synset': 'bolshevik.n.02', 'name': 'Bolshevik'}, {'id': 15257, 'synset': 'bombshell.n.01', 'name': 'bombshell'}, {'id': 15258, 'synset': 'bondman.n.01', 'name': 'bondman'}, {'id': 15259, 'synset': 'bondwoman.n.02', 'name': 'bondwoman'}, {'id': 15260, 'synset': 'bondwoman.n.01', 'name': 'bondwoman'}, {'id': 15261, 'synset': 'bond_servant.n.01', 'name': 'bond_servant'}, {'id': 15262, 'synset': 'book_agent.n.01', 'name': 'book_agent'}, {'id': 15263, 'synset': 'bookbinder.n.01', 'name': 'bookbinder'}, {'id': 15264, 'synset': 'bookkeeper.n.01', 'name': 'bookkeeper'}, {'id': 15265, 'synset': 'bookmaker.n.01', 'name': 'bookmaker'}, {'id': 15266, 'synset': 'bookworm.n.02', 'name': 'bookworm'}, {'id': 15267, 'synset': 'booster.n.03', 'name': 'booster'}, {'id': 15268, 'synset': 'bootblack.n.01', 'name': 'bootblack'}, {'id': 15269, 'synset': 'bootlegger.n.01', 'name': 'bootlegger'}, {'id': 15270, 'synset': 'bootmaker.n.01', 'name': 'bootmaker'}, {'id': 15271, 'synset': 'borderer.n.01', 'name': 'borderer'}, {'id': 15272, 'synset': 'border_patrolman.n.01', 'name': 'border_patrolman'}, {'id': 15273, 'synset': 'botanist.n.01', 'name': 'botanist'}, {'id': 15274, 'synset': 'bottom_feeder.n.01', 'name': 'bottom_feeder'}, {'id': 15275, 'synset': 'boulevardier.n.01', 'name': 'boulevardier'}, {'id': 15276, 'synset': 'bounty_hunter.n.02', 'name': 'bounty_hunter'}, {'id': 15277, 'synset': 'bounty_hunter.n.01', 'name': 'bounty_hunter'}, {'id': 15278, 'synset': 'bourbon.n.03', 'name': 'Bourbon'}, {'id': 15279, 'synset': 'bowler.n.01', 'name': 'bowler'}, {'id': 15280, 'synset': 'slugger.n.02', 'name': 'slugger'}, {'id': 15281, 'synset': 'cub.n.02', 'name': 'cub'}, {'id': 15282, 'synset': 'boy_scout.n.01', 'name': 'Boy_Scout'}, {'id': 15283, 'synset': 'boy_scout.n.02', 'name': 'boy_scout'}, {'id': 15284, 'synset': 'boy_wonder.n.01', 'name': 'boy_wonder'}, {'id': 15285, 'synset': 'bragger.n.01', 'name': 'bragger'}, {'id': 15286, 'synset': 'brahman.n.02', 'name': 'brahman'}, {'id': 15287, 'synset': 'brawler.n.01', 'name': 'brawler'}, {'id': 15288, 'synset': 'breadwinner.n.01', 'name': 'breadwinner'}, {'id': 15289, 'synset': 'breaststroker.n.01', 'name': 'breaststroker'}, {'id': 15290, 'synset': 'breeder.n.01', 'name': 'breeder'}, {'id': 15291, 'synset': 'brick.n.02', 'name': 'brick'}, {'id': 15292, 'synset': 'bride.n.03', 'name': 'bride'}, {'id': 15293, 'synset': 'bridesmaid.n.01', 'name': 'bridesmaid'}, {'id': 15294, 'synset': 'bridge_agent.n.01', 'name': 'bridge_agent'}, {'id': 15295, 'synset': 'broadcast_journalist.n.01', 'name': 'broadcast_journalist'}, {'id': 15296, 'synset': 'brother.n.05', 'name': 'Brother'}, {'id': 15297, 'synset': 'brother-in-law.n.01', 'name': 'brother-in-law'}, {'id': 15298, 'synset': 'browser.n.01', 'name': 'browser'}, {'id': 15299, 'synset': 'brummie.n.01', 'name': 'Brummie'}, {'id': 15300, 'synset': 'buddy.n.01', 'name': 'buddy'}, {'id': 15301, 'synset': 'bull.n.06', 'name': 'bull'}, {'id': 15302, 'synset': 'bully.n.02', 'name': 'bully'}, {'id': 15303, 'synset': 'bunny.n.01', 'name': 'bunny'}, {'id': 15304, 'synset': 'burglar.n.01', 'name': 'burglar'}, {'id': 15305, 'synset': 'bursar.n.01', 'name': 'bursar'}, {'id': 15306, 'synset': 'busboy.n.01', 'name': 'busboy'}, {'id': 15307, 'synset': 'business_editor.n.01', 'name': 'business_editor'}, {'id': 15308, 'synset': 'business_traveler.n.01', 'name': 'business_traveler'}, {'id': 15309, 'synset': 'buster.n.04', 'name': 'buster'}, {'id': 15310, 'synset': 'busybody.n.01', 'name': 'busybody'}, {'id': 15311, 'synset': 'buttinsky.n.01', 'name': 'buttinsky'}, {'id': 15312, 'synset': 'cabinetmaker.n.01', 'name': 'cabinetmaker'}, {'id': 15313, 'synset': 'caddie.n.01', 'name': 'caddie'}, {'id': 15314, 'synset': 'cadet.n.01', 'name': 'cadet'}, {'id': 15315, 'synset': 'caller.n.04', 'name': 'caller'}, {'id': 15316, 'synset': 'call_girl.n.01', 'name': 'call_girl'}, {'id': 15317, 'synset': 'calligrapher.n.01', 'name': 'calligrapher'}, {'id': 15318, 'synset': 'campaigner.n.01', 'name': 'campaigner'}, {'id': 15319, 'synset': 'camper.n.01', 'name': 'camper'}, {'id': 15320, 'synset': 'camp_follower.n.02', 'name': 'camp_follower'}, {'id': 15321, 'synset': 'candidate.n.02', 'name': 'candidate'}, {'id': 15322, 'synset': 'canonist.n.01', 'name': 'canonist'}, {'id': 15323, 'synset': 'capitalist.n.01', 'name': 'capitalist'}, {'id': 15324, 'synset': 'captain.n.07', 'name': 'captain'}, {'id': 15325, 'synset': 'captain.n.06', 'name': 'captain'}, {'id': 15326, 'synset': 'captain.n.01', 'name': 'captain'}, {'id': 15327, 'synset': 'captain.n.05', 'name': 'captain'}, {'id': 15328, 'synset': 'captive.n.02', 'name': 'captive'}, {'id': 15329, 'synset': 'captive.n.03', 'name': 'captive'}, {'id': 15330, 'synset': 'cardinal.n.01', 'name': 'cardinal'}, {'id': 15331, 'synset': 'cardiologist.n.01', 'name': 'cardiologist'}, {'id': 15332, 'synset': 'card_player.n.01', 'name': 'card_player'}, {'id': 15333, 'synset': 'cardsharp.n.01', 'name': 'cardsharp'}, {'id': 15334, 'synset': 'careerist.n.01', 'name': 'careerist'}, {'id': 15335, 'synset': 'career_man.n.01', 'name': 'career_man'}, {'id': 15336, 'synset': 'caregiver.n.02', 'name': 'caregiver'}, {'id': 15337, 'synset': 'caretaker.n.01', 'name': 'caretaker'}, {'id': 15338, 'synset': 'caretaker.n.02', 'name': 'caretaker'}, {'id': 15339, 'synset': 'caricaturist.n.01', 'name': 'caricaturist'}, {'id': 15340, 'synset': 'carillonneur.n.01', 'name': 'carillonneur'}, {'id': 15341, 'synset': 'caroler.n.01', 'name': 'caroler'}, {'id': 15342, 'synset': 'carpenter.n.01', 'name': 'carpenter'}, {'id': 15343, 'synset': 'carper.n.01', 'name': 'carper'}, {'id': 15344, 'synset': 'cartesian.n.01', 'name': 'Cartesian'}, {'id': 15345, 'synset': 'cashier.n.02', 'name': 'cashier'}, {'id': 15346, 'synset': 'casualty.n.02', 'name': 'casualty'}, {'id': 15347, 'synset': 'casualty.n.01', 'name': 'casualty'}, {'id': 15348, 'synset': 'casuist.n.01', 'name': 'casuist'}, {'id': 15349, 'synset': 'catechist.n.01', 'name': 'catechist'}, {'id': 15350, 'synset': 'catechumen.n.01', 'name': 'catechumen'}, {'id': 15351, 'synset': 'caterer.n.01', 'name': 'caterer'}, {'id': 15352, 'synset': 'catholicos.n.01', 'name': 'Catholicos'}, {'id': 15353, 'synset': 'cat_fancier.n.01', 'name': 'cat_fancier'}, {'id': 15354, 'synset': 'cavalier.n.02', 'name': 'Cavalier'}, {'id': 15355, 'synset': 'cavalryman.n.02', 'name': 'cavalryman'}, {'id': 15356, 'synset': 'caveman.n.01', 'name': 'caveman'}, {'id': 15357, 'synset': 'celebrant.n.02', 'name': 'celebrant'}, {'id': 15358, 'synset': 'celebrant.n.01', 'name': 'celebrant'}, {'id': 15359, 'synset': 'celebrity.n.01', 'name': 'celebrity'}, {'id': 15360, 'synset': 'cellist.n.01', 'name': 'cellist'}, {'id': 15361, 'synset': 'censor.n.02', 'name': 'censor'}, {'id': 15362, 'synset': 'censor.n.01', 'name': 'censor'}, {'id': 15363, 'synset': 'centenarian.n.01', 'name': 'centenarian'}, {'id': 15364, 'synset': 'centrist.n.01', 'name': 'centrist'}, {'id': 15365, 'synset': 'centurion.n.01', 'name': 'centurion'}, {'id': 15366, 'synset': 'certified_public_accountant.n.01', 'name': 'certified_public_accountant'}, {'id': 15367, 'synset': 'chachka.n.01', 'name': 'chachka'}, {'id': 15368, 'synset': 'chambermaid.n.01', 'name': 'chambermaid'}, {'id': 15369, 'synset': 'chameleon.n.01', 'name': 'chameleon'}, {'id': 15370, 'synset': 'champion.n.01', 'name': 'champion'}, {'id': 15371, 'synset': 'chandler.n.02', 'name': 'chandler'}, {'id': 15372, 'synset': 'prison_chaplain.n.01', 'name': 'prison_chaplain'}, {'id': 15373, 'synset': 'charcoal_burner.n.01', 'name': 'charcoal_burner'}, {'id': 15374, 'synset': "charge_d'affaires.n.01", 'name': "charge_d'affaires"}, {'id': 15375, 'synset': 'charioteer.n.01', 'name': 'charioteer'}, {'id': 15376, 'synset': 'charmer.n.02', 'name': 'charmer'}, {'id': 15377, 'synset': 'chartered_accountant.n.01', 'name': 'chartered_accountant'}, {'id': 15378, 'synset': 'chartist.n.02', 'name': 'chartist'}, {'id': 15379, 'synset': 'charwoman.n.01', 'name': 'charwoman'}, {'id': 15380, 'synset': 'male_chauvinist.n.01', 'name': 'male_chauvinist'}, {'id': 15381, 'synset': 'cheapskate.n.01', 'name': 'cheapskate'}, {'id': 15382, 'synset': 'chechen.n.01', 'name': 'Chechen'}, {'id': 15383, 'synset': 'checker.n.02', 'name': 'checker'}, {'id': 15384, 'synset': 'cheerer.n.01', 'name': 'cheerer'}, {'id': 15385, 'synset': 'cheerleader.n.02', 'name': 'cheerleader'}, {'id': 15386, 'synset': 'cheerleader.n.01', 'name': 'cheerleader'}, {'id': 15387, 'synset': 'cheops.n.01', 'name': 'Cheops'}, {'id': 15388, 'synset': 'chess_master.n.01', 'name': 'chess_master'}, {'id': 15389, 'synset': 'chief_executive_officer.n.01', 'name': 'chief_executive_officer'}, {'id': 15390, 'synset': 'chief_of_staff.n.01', 'name': 'chief_of_staff'}, {'id': 15391, 'synset': 'chief_petty_officer.n.01', 'name': 'chief_petty_officer'}, {'id': 15392, 'synset': 'chief_secretary.n.01', 'name': 'Chief_Secretary'}, {'id': 15393, 'synset': 'child.n.01', 'name': 'child'}, {'id': 15394, 'synset': 'child.n.02', 'name': 'child'}, {'id': 15395, 'synset': 'child.n.03', 'name': 'child'}, {'id': 15396, 'synset': 'child_prodigy.n.01', 'name': 'child_prodigy'}, {'id': 15397, 'synset': 'chimneysweeper.n.01', 'name': 'chimneysweeper'}, {'id': 15398, 'synset': 'chiropractor.n.01', 'name': 'chiropractor'}, {'id': 15399, 'synset': 'chit.n.01', 'name': 'chit'}, {'id': 15400, 'synset': 'choker.n.02', 'name': 'choker'}, {'id': 15401, 'synset': 'choragus.n.01', 'name': 'choragus'}, {'id': 15402, 'synset': 'choreographer.n.01', 'name': 'choreographer'}, {'id': 15403, 'synset': 'chorus_girl.n.01', 'name': 'chorus_girl'}, {'id': 15404, 'synset': 'chosen.n.01', 'name': 'chosen'}, {'id': 15405, 'synset': 'cicerone.n.01', 'name': 'cicerone'}, {'id': 15406, 'synset': 'cigar_smoker.n.01', 'name': 'cigar_smoker'}, {'id': 15407, 'synset': 'cipher.n.04', 'name': 'cipher'}, {'id': 15408, 'synset': 'circus_acrobat.n.01', 'name': 'circus_acrobat'}, {'id': 15409, 'synset': 'citizen.n.01', 'name': 'citizen'}, {'id': 15410, 'synset': 'city_editor.n.01', 'name': 'city_editor'}, {'id': 15411, 'synset': 'city_father.n.01', 'name': 'city_father'}, {'id': 15412, 'synset': 'city_man.n.01', 'name': 'city_man'}, {'id': 15413, 'synset': 'city_slicker.n.01', 'name': 'city_slicker'}, {'id': 15414, 'synset': 'civic_leader.n.01', 'name': 'civic_leader'}, {'id': 15415, 'synset': 'civil_rights_leader.n.01', 'name': 'civil_rights_leader'}, {'id': 15416, 'synset': 'cleaner.n.03', 'name': 'cleaner'}, {'id': 15417, 'synset': 'clergyman.n.01', 'name': 'clergyman'}, {'id': 15418, 'synset': 'cleric.n.01', 'name': 'cleric'}, {'id': 15419, 'synset': 'clerk.n.01', 'name': 'clerk'}, {'id': 15420, 'synset': 'clever_dick.n.01', 'name': 'clever_Dick'}, {'id': 15421, 'synset': 'climatologist.n.01', 'name': 'climatologist'}, {'id': 15422, 'synset': 'climber.n.04', 'name': 'climber'}, {'id': 15423, 'synset': 'clinician.n.01', 'name': 'clinician'}, {'id': 15424, 'synset': 'closer.n.02', 'name': 'closer'}, {'id': 15425, 'synset': 'closet_queen.n.01', 'name': 'closet_queen'}, {'id': 15426, 'synset': 'clown.n.02', 'name': 'clown'}, {'id': 15427, 'synset': 'clown.n.01', 'name': 'clown'}, {'id': 15428, 'synset': 'coach.n.02', 'name': 'coach'}, {'id': 15429, 'synset': 'coach.n.01', 'name': 'coach'}, {'id': 15430, 'synset': 'pitching_coach.n.01', 'name': 'pitching_coach'}, {'id': 15431, 'synset': 'coachman.n.01', 'name': 'coachman'}, {'id': 15432, 'synset': 'coal_miner.n.01', 'name': 'coal_miner'}, {'id': 15433, 'synset': 'coastguardsman.n.01', 'name': 'coastguardsman'}, {'id': 15434, 'synset': 'cobber.n.01', 'name': 'cobber'}, {'id': 15435, 'synset': 'cobbler.n.01', 'name': 'cobbler'}, {'id': 15436, 'synset': 'codger.n.01', 'name': 'codger'}, {'id': 15437, 'synset': 'co-beneficiary.n.01', 'name': 'co-beneficiary'}, {'id': 15438, 'synset': 'cog.n.01', 'name': 'cog'}, {'id': 15439, 'synset': 'cognitive_neuroscientist.n.01', 'name': 'cognitive_neuroscientist'}, {'id': 15440, 'synset': 'coiffeur.n.01', 'name': 'coiffeur'}, {'id': 15441, 'synset': 'coiner.n.02', 'name': 'coiner'}, {'id': 15442, 'synset': 'collaborator.n.03', 'name': 'collaborator'}, {'id': 15443, 'synset': 'colleen.n.01', 'name': 'colleen'}, {'id': 15444, 'synset': 'college_student.n.01', 'name': 'college_student'}, {'id': 15445, 'synset': 'collegian.n.01', 'name': 'collegian'}, {'id': 15446, 'synset': 'colonial.n.01', 'name': 'colonial'}, {'id': 15447, 'synset': 'colonialist.n.01', 'name': 'colonialist'}, {'id': 15448, 'synset': 'colonizer.n.01', 'name': 'colonizer'}, {'id': 15449, 'synset': 'coloratura.n.01', 'name': 'coloratura'}, {'id': 15450, 'synset': 'color_guard.n.01', 'name': 'color_guard'}, {'id': 15451, 'synset': 'colossus.n.02', 'name': 'colossus'}, {'id': 15452, 'synset': 'comedian.n.02', 'name': 'comedian'}, {'id': 15453, 'synset': 'comedienne.n.02', 'name': 'comedienne'}, {'id': 15454, 'synset': 'comer.n.01', 'name': 'comer'}, {'id': 15455, 'synset': 'commander.n.03', 'name': 'commander'}, {'id': 15456, 'synset': 'commander_in_chief.n.01', 'name': 'commander_in_chief'}, {'id': 15457, 'synset': 'commanding_officer.n.01', 'name': 'commanding_officer'}, {'id': 15458, 'synset': 'commissar.n.01', 'name': 'commissar'}, {'id': 15459, 'synset': 'commissioned_officer.n.01', 'name': 'commissioned_officer'}, {'id': 15460, 'synset': 'commissioned_military_officer.n.01', 'name': 'commissioned_military_officer'}, {'id': 15461, 'synset': 'commissioner.n.01', 'name': 'commissioner'}, {'id': 15462, 'synset': 'commissioner.n.02', 'name': 'commissioner'}, {'id': 15463, 'synset': 'committee_member.n.01', 'name': 'committee_member'}, {'id': 15464, 'synset': 'committeewoman.n.01', 'name': 'committeewoman'}, {'id': 15465, 'synset': 'commodore.n.01', 'name': 'commodore'}, {'id': 15466, 'synset': 'communicant.n.01', 'name': 'communicant'}, {'id': 15467, 'synset': 'communist.n.02', 'name': 'communist'}, {'id': 15468, 'synset': 'communist.n.01', 'name': 'Communist'}, {'id': 15469, 'synset': 'commuter.n.02', 'name': 'commuter'}, {'id': 15470, 'synset': 'compere.n.01', 'name': 'compere'}, {'id': 15471, 'synset': 'complexifier.n.01', 'name': 'complexifier'}, {'id': 15472, 'synset': 'compulsive.n.01', 'name': 'compulsive'}, {'id': 15473, 'synset': 'computational_linguist.n.01', 'name': 'computational_linguist'}, {'id': 15474, 'synset': 'computer_scientist.n.01', 'name': 'computer_scientist'}, {'id': 15475, 'synset': 'computer_user.n.01', 'name': 'computer_user'}, {'id': 15476, 'synset': 'comrade.n.02', 'name': 'Comrade'}, {'id': 15477, 'synset': 'concert-goer.n.01', 'name': 'concert-goer'}, {'id': 15478, 'synset': 'conciliator.n.01', 'name': 'conciliator'}, {'id': 15479, 'synset': 'conductor.n.03', 'name': 'conductor'}, {'id': 15480, 'synset': 'confectioner.n.01', 'name': 'confectioner'}, {'id': 15481, 'synset': 'confederate.n.01', 'name': 'Confederate'}, {'id': 15482, 'synset': 'confessor.n.01', 'name': 'confessor'}, {'id': 15483, 'synset': 'confidant.n.01', 'name': 'confidant'}, {'id': 15484, 'synset': 'confucian.n.01', 'name': 'Confucian'}, {'id': 15485, 'synset': 'rep.n.01', 'name': 'rep'}, {'id': 15486, 'synset': 'conqueror.n.01', 'name': 'conqueror'}, {'id': 15487, 'synset': 'conservative.n.02', 'name': 'Conservative'}, {'id': 15488, 'synset': 'nonconformist.n.01', 'name': 'Nonconformist'}, {'id': 15489, 'synset': 'anglican.n.01', 'name': 'Anglican'}, {'id': 15490, 'synset': 'consignee.n.01', 'name': 'consignee'}, {'id': 15491, 'synset': 'consigner.n.01', 'name': 'consigner'}, {'id': 15492, 'synset': 'constable.n.01', 'name': 'constable'}, {'id': 15493, 'synset': 'constructivist.n.01', 'name': 'constructivist'}, {'id': 15494, 'synset': 'contractor.n.01', 'name': 'contractor'}, {'id': 15495, 'synset': 'contralto.n.01', 'name': 'contralto'}, {'id': 15496, 'synset': 'contributor.n.02', 'name': 'contributor'}, {'id': 15497, 'synset': 'control_freak.n.01', 'name': 'control_freak'}, {'id': 15498, 'synset': 'convalescent.n.01', 'name': 'convalescent'}, {'id': 15499, 'synset': 'convener.n.01', 'name': 'convener'}, {'id': 15500, 'synset': 'convict.n.01', 'name': 'convict'}, {'id': 15501, 'synset': 'copilot.n.01', 'name': 'copilot'}, {'id': 15502, 'synset': 'copycat.n.01', 'name': 'copycat'}, {'id': 15503, 'synset': 'coreligionist.n.01', 'name': 'coreligionist'}, {'id': 15504, 'synset': 'cornerback.n.01', 'name': 'cornerback'}, {'id': 15505, 'synset': 'corporatist.n.01', 'name': 'corporatist'}, {'id': 15506, 'synset': 'correspondent.n.01', 'name': 'correspondent'}, {'id': 15507, 'synset': 'cosmetician.n.01', 'name': 'cosmetician'}, {'id': 15508, 'synset': 'cosmopolitan.n.01', 'name': 'cosmopolitan'}, {'id': 15509, 'synset': 'cossack.n.01', 'name': 'Cossack'}, {'id': 15510, 'synset': 'cost_accountant.n.01', 'name': 'cost_accountant'}, {'id': 15511, 'synset': 'co-star.n.01', 'name': 'co-star'}, {'id': 15512, 'synset': 'costumier.n.01', 'name': 'costumier'}, {'id': 15513, 'synset': 'cotter.n.02', 'name': 'cotter'}, {'id': 15514, 'synset': 'cotter.n.01', 'name': 'cotter'}, {'id': 15515, 'synset': 'counselor.n.01', 'name': 'counselor'}, {'id': 15516, 'synset': 'counterterrorist.n.01', 'name': 'counterterrorist'}, {'id': 15517, 'synset': 'counterspy.n.01', 'name': 'counterspy'}, {'id': 15518, 'synset': 'countess.n.01', 'name': 'countess'}, {'id': 15519, 'synset': 'compromiser.n.01', 'name': 'compromiser'}, {'id': 15520, 'synset': 'countrywoman.n.01', 'name': 'countrywoman'}, {'id': 15521, 'synset': 'county_agent.n.01', 'name': 'county_agent'}, {'id': 15522, 'synset': 'courtier.n.01', 'name': 'courtier'}, {'id': 15523, 'synset': 'cousin.n.01', 'name': 'cousin'}, {'id': 15524, 'synset': 'cover_girl.n.01', 'name': 'cover_girl'}, {'id': 15525, 'synset': 'cow.n.03', 'name': 'cow'}, {'id': 15526, 'synset': 'craftsman.n.03', 'name': 'craftsman'}, {'id': 15527, 'synset': 'craftsman.n.02', 'name': 'craftsman'}, {'id': 15528, 'synset': 'crapshooter.n.01', 'name': 'crapshooter'}, {'id': 15529, 'synset': 'crazy.n.01', 'name': 'crazy'}, {'id': 15530, 'synset': 'creature.n.02', 'name': 'creature'}, {'id': 15531, 'synset': 'creditor.n.01', 'name': 'creditor'}, {'id': 15532, 'synset': 'creep.n.01', 'name': 'creep'}, {'id': 15533, 'synset': 'criminologist.n.01', 'name': 'criminologist'}, {'id': 15534, 'synset': 'critic.n.02', 'name': 'critic'}, {'id': 15535, 'synset': 'croesus.n.02', 'name': 'Croesus'}, {'id': 15536, 'synset': 'cross-examiner.n.01', 'name': 'cross-examiner'}, {'id': 15537, 'synset': 'crossover_voter.n.01', 'name': 'crossover_voter'}, {'id': 15538, 'synset': 'croupier.n.01', 'name': 'croupier'}, {'id': 15539, 'synset': 'crown_prince.n.01', 'name': 'crown_prince'}, {'id': 15540, 'synset': 'crown_princess.n.01', 'name': 'crown_princess'}, {'id': 15541, 'synset': 'cryptanalyst.n.01', 'name': 'cryptanalyst'}, {'id': 15542, 'synset': 'cub_scout.n.01', 'name': 'Cub_Scout'}, {'id': 15543, 'synset': 'cuckold.n.01', 'name': 'cuckold'}, {'id': 15544, 'synset': 'cultist.n.02', 'name': 'cultist'}, {'id': 15545, 'synset': 'curandera.n.01', 'name': 'curandera'}, {'id': 15546, 'synset': 'curate.n.01', 'name': 'curate'}, {'id': 15547, 'synset': 'curator.n.01', 'name': 'curator'}, {'id': 15548, 'synset': 'customer_agent.n.01', 'name': 'customer_agent'}, {'id': 15549, 'synset': 'cutter.n.02', 'name': 'cutter'}, {'id': 15550, 'synset': 'cyberpunk.n.02', 'name': 'cyberpunk'}, {'id': 15551, 'synset': 'cyborg.n.01', 'name': 'cyborg'}, {'id': 15552, 'synset': 'cymbalist.n.01', 'name': 'cymbalist'}, {'id': 15553, 'synset': 'cynic.n.02', 'name': 'Cynic'}, {'id': 15554, 'synset': 'cytogeneticist.n.01', 'name': 'cytogeneticist'}, {'id': 15555, 'synset': 'cytologist.n.01', 'name': 'cytologist'}, {'id': 15556, 'synset': 'czar.n.02', 'name': 'czar'}, {'id': 15557, 'synset': 'czar.n.01', 'name': 'czar'}, {'id': 15558, 'synset': 'dad.n.01', 'name': 'dad'}, {'id': 15559, 'synset': 'dairyman.n.02', 'name': 'dairyman'}, {'id': 15560, 'synset': 'dalai_lama.n.01', 'name': 'Dalai_Lama'}, {'id': 15561, 'synset': 'dallier.n.01', 'name': 'dallier'}, {'id': 15562, 'synset': 'dancer.n.01', 'name': 'dancer'}, {'id': 15563, 'synset': 'dancer.n.02', 'name': 'dancer'}, {'id': 15564, 'synset': 'clog_dancer.n.01', 'name': 'clog_dancer'}, {'id': 15565, 'synset': 'dancing-master.n.01', 'name': 'dancing-master'}, {'id': 15566, 'synset': 'dark_horse.n.01', 'name': 'dark_horse'}, {'id': 15567, 'synset': 'darling.n.01', 'name': 'darling'}, {'id': 15568, 'synset': 'date.n.02', 'name': 'date'}, {'id': 15569, 'synset': 'daughter.n.01', 'name': 'daughter'}, {'id': 15570, 'synset': 'dawdler.n.01', 'name': 'dawdler'}, {'id': 15571, 'synset': 'day_boarder.n.01', 'name': 'day_boarder'}, {'id': 15572, 'synset': 'day_laborer.n.01', 'name': 'day_laborer'}, {'id': 15573, 'synset': 'deacon.n.01', 'name': 'deacon'}, {'id': 15574, 'synset': 'deaconess.n.01', 'name': 'deaconess'}, {'id': 15575, 'synset': 'deadeye.n.01', 'name': 'deadeye'}, {'id': 15576, 'synset': 'deipnosophist.n.01', 'name': 'deipnosophist'}, {'id': 15577, 'synset': 'dropout.n.02', 'name': 'dropout'}, {'id': 15578, 'synset': 'deadhead.n.01', 'name': 'deadhead'}, {'id': 15579, 'synset': 'deaf_person.n.01', 'name': 'deaf_person'}, {'id': 15580, 'synset': 'debtor.n.01', 'name': 'debtor'}, {'id': 15581, 'synset': 'deckhand.n.01', 'name': 'deckhand'}, {'id': 15582, 'synset': 'defamer.n.01', 'name': 'defamer'}, {'id': 15583, 'synset': 'defense_contractor.n.01', 'name': 'defense_contractor'}, {'id': 15584, 'synset': 'deist.n.01', 'name': 'deist'}, {'id': 15585, 'synset': 'delegate.n.01', 'name': 'delegate'}, {'id': 15586, 'synset': 'deliveryman.n.01', 'name': 'deliveryman'}, {'id': 15587, 'synset': 'demagogue.n.01', 'name': 'demagogue'}, {'id': 15588, 'synset': 'demigod.n.01', 'name': 'demigod'}, {'id': 15589, 'synset': 'demographer.n.01', 'name': 'demographer'}, {'id': 15590, 'synset': 'demonstrator.n.03', 'name': 'demonstrator'}, {'id': 15591, 'synset': 'den_mother.n.02', 'name': 'den_mother'}, {'id': 15592, 'synset': 'department_head.n.01', 'name': 'department_head'}, {'id': 15593, 'synset': 'depositor.n.01', 'name': 'depositor'}, {'id': 15594, 'synset': 'deputy.n.03', 'name': 'deputy'}, {'id': 15595, 'synset': 'dermatologist.n.01', 'name': 'dermatologist'}, {'id': 15596, 'synset': 'descender.n.01', 'name': 'descender'}, {'id': 15597, 'synset': 'designated_hitter.n.01', 'name': 'designated_hitter'}, {'id': 15598, 'synset': 'designer.n.04', 'name': 'designer'}, {'id': 15599, 'synset': 'desk_clerk.n.01', 'name': 'desk_clerk'}, {'id': 15600, 'synset': 'desk_officer.n.01', 'name': 'desk_officer'}, {'id': 15601, 'synset': 'desk_sergeant.n.01', 'name': 'desk_sergeant'}, {'id': 15602, 'synset': 'detainee.n.01', 'name': 'detainee'}, {'id': 15603, 'synset': 'detective.n.01', 'name': 'detective'}, {'id': 15604, 'synset': 'detective.n.02', 'name': 'detective'}, {'id': 15605, 'synset': 'detractor.n.01', 'name': 'detractor'}, {'id': 15606, 'synset': 'developer.n.01', 'name': 'developer'}, {'id': 15607, 'synset': 'deviationist.n.01', 'name': 'deviationist'}, {'id': 15608, 'synset': 'devisee.n.01', 'name': 'devisee'}, {'id': 15609, 'synset': 'devisor.n.01', 'name': 'devisor'}, {'id': 15610, 'synset': 'devourer.n.01', 'name': 'devourer'}, {'id': 15611, 'synset': 'dialectician.n.01', 'name': 'dialectician'}, {'id': 15612, 'synset': 'diarist.n.01', 'name': 'diarist'}, {'id': 15613, 'synset': 'dietician.n.01', 'name': 'dietician'}, {'id': 15614, 'synset': 'diocesan.n.01', 'name': 'diocesan'}, {'id': 15615, 'synset': 'director.n.03', 'name': 'director'}, {'id': 15616, 'synset': 'director.n.02', 'name': 'director'}, {'id': 15617, 'synset': 'dirty_old_man.n.01', 'name': 'dirty_old_man'}, {'id': 15618, 'synset': 'disbeliever.n.01', 'name': 'disbeliever'}, {'id': 15619, 'synset': 'disk_jockey.n.01', 'name': 'disk_jockey'}, {'id': 15620, 'synset': 'dispatcher.n.02', 'name': 'dispatcher'}, {'id': 15621, 'synset': 'distortionist.n.01', 'name': 'distortionist'}, {'id': 15622, 'synset': 'distributor.n.01', 'name': 'distributor'}, {'id': 15623, 'synset': 'district_attorney.n.01', 'name': 'district_attorney'}, {'id': 15624, 'synset': 'district_manager.n.01', 'name': 'district_manager'}, {'id': 15625, 'synset': 'diver.n.02', 'name': 'diver'}, {'id': 15626, 'synset': 'divorcee.n.01', 'name': 'divorcee'}, {'id': 15627, 'synset': 'ex-wife.n.01', 'name': 'ex-wife'}, {'id': 15628, 'synset': 'divorce_lawyer.n.01', 'name': 'divorce_lawyer'}, {'id': 15629, 'synset': 'docent.n.01', 'name': 'docent'}, {'id': 15630, 'synset': 'doctor.n.01', 'name': 'doctor'}, {'id': 15631, 'synset': 'dodo.n.01', 'name': 'dodo'}, {'id': 15632, 'synset': 'doge.n.01', 'name': 'doge'}, {'id': 15633, 'synset': 'dog_in_the_manger.n.01', 'name': 'dog_in_the_manger'}, {'id': 15634, 'synset': 'dogmatist.n.01', 'name': 'dogmatist'}, {'id': 15635, 'synset': 'dolichocephalic.n.01', 'name': 'dolichocephalic'}, {'id': 15636, 'synset': 'domestic_partner.n.01', 'name': 'domestic_partner'}, {'id': 15637, 'synset': 'dominican.n.02', 'name': 'Dominican'}, {'id': 15638, 'synset': 'dominus.n.01', 'name': 'dominus'}, {'id': 15639, 'synset': 'don.n.03', 'name': 'don'}, {'id': 15640, 'synset': 'donatist.n.01', 'name': 'Donatist'}, {'id': 15641, 'synset': 'donna.n.01', 'name': 'donna'}, {'id': 15642, 'synset': 'dosser.n.01', 'name': 'dosser'}, {'id': 15643, 'synset': 'double.n.03', 'name': 'double'}, {'id': 15644, 'synset': 'double-crosser.n.01', 'name': 'double-crosser'}, {'id': 15645, 'synset': 'down-and-out.n.01', 'name': 'down-and-out'}, {'id': 15646, 'synset': 'doyenne.n.01', 'name': 'doyenne'}, {'id': 15647, 'synset': 'draftsman.n.02', 'name': 'draftsman'}, {'id': 15648, 'synset': 'dramatist.n.01', 'name': 'dramatist'}, {'id': 15649, 'synset': 'dreamer.n.01', 'name': 'dreamer'}, {'id': 15650, 'synset': 'dressmaker.n.01', 'name': 'dressmaker'}, {'id': 15651, 'synset': "dressmaker's_model.n.01", 'name': "dressmaker's_model"}, {'id': 15652, 'synset': 'dribbler.n.02', 'name': 'dribbler'}, {'id': 15653, 'synset': 'dribbler.n.01', 'name': 'dribbler'}, {'id': 15654, 'synset': 'drinker.n.02', 'name': 'drinker'}, {'id': 15655, 'synset': 'drinker.n.01', 'name': 'drinker'}, {'id': 15656, 'synset': 'drug_addict.n.01', 'name': 'drug_addict'}, {'id': 15657, 'synset': 'drug_user.n.01', 'name': 'drug_user'}, {'id': 15658, 'synset': 'druid.n.01', 'name': 'Druid'}, {'id': 15659, 'synset': 'drum_majorette.n.02', 'name': 'drum_majorette'}, {'id': 15660, 'synset': 'drummer.n.01', 'name': 'drummer'}, {'id': 15661, 'synset': 'drunk.n.02', 'name': 'drunk'}, {'id': 15662, 'synset': 'drunkard.n.01', 'name': 'drunkard'}, {'id': 15663, 'synset': 'druze.n.01', 'name': 'Druze'}, {'id': 15664, 'synset': 'dry.n.01', 'name': 'dry'}, {'id': 15665, 'synset': 'dry_nurse.n.01', 'name': 'dry_nurse'}, {'id': 15666, 'synset': 'duchess.n.01', 'name': 'duchess'}, {'id': 15667, 'synset': 'duke.n.01', 'name': 'duke'}, {'id': 15668, 'synset': 'duffer.n.01', 'name': 'duffer'}, {'id': 15669, 'synset': 'dunker.n.02', 'name': 'dunker'}, {'id': 15670, 'synset': 'dutch_uncle.n.01', 'name': 'Dutch_uncle'}, {'id': 15671, 'synset': 'dyspeptic.n.01', 'name': 'dyspeptic'}, {'id': 15672, 'synset': 'eager_beaver.n.01', 'name': 'eager_beaver'}, {'id': 15673, 'synset': 'earl.n.01', 'name': 'earl'}, {'id': 15674, 'synset': 'earner.n.01', 'name': 'earner'}, {'id': 15675, 'synset': 'eavesdropper.n.01', 'name': 'eavesdropper'}, {'id': 15676, 'synset': 'eccentric.n.01', 'name': 'eccentric'}, {'id': 15677, 'synset': 'eclectic.n.01', 'name': 'eclectic'}, {'id': 15678, 'synset': 'econometrician.n.01', 'name': 'econometrician'}, {'id': 15679, 'synset': 'economist.n.01', 'name': 'economist'}, {'id': 15680, 'synset': 'ectomorph.n.01', 'name': 'ectomorph'}, {'id': 15681, 'synset': 'editor.n.01', 'name': 'editor'}, {'id': 15682, 'synset': 'egocentric.n.01', 'name': 'egocentric'}, {'id': 15683, 'synset': 'egotist.n.01', 'name': 'egotist'}, {'id': 15684, 'synset': 'ejaculator.n.01', 'name': 'ejaculator'}, {'id': 15685, 'synset': 'elder.n.03', 'name': 'elder'}, {'id': 15686, 'synset': 'elder_statesman.n.01', 'name': 'elder_statesman'}, {'id': 15687, 'synset': 'elected_official.n.01', 'name': 'elected_official'}, {'id': 15688, 'synset': 'electrician.n.01', 'name': 'electrician'}, {'id': 15689, 'synset': 'elegist.n.01', 'name': 'elegist'}, {'id': 15690, 'synset': 'elocutionist.n.01', 'name': 'elocutionist'}, {'id': 15691, 'synset': 'emancipator.n.01', 'name': 'emancipator'}, {'id': 15692, 'synset': 'embryologist.n.01', 'name': 'embryologist'}, {'id': 15693, 'synset': 'emeritus.n.01', 'name': 'emeritus'}, {'id': 15694, 'synset': 'emigrant.n.01', 'name': 'emigrant'}, {'id': 15695, 'synset': 'emissary.n.01', 'name': 'emissary'}, {'id': 15696, 'synset': 'empress.n.01', 'name': 'empress'}, {'id': 15697, 'synset': 'employee.n.01', 'name': 'employee'}, {'id': 15698, 'synset': 'employer.n.01', 'name': 'employer'}, {'id': 15699, 'synset': 'enchantress.n.02', 'name': 'enchantress'}, {'id': 15700, 'synset': 'enchantress.n.01', 'name': 'enchantress'}, {'id': 15701, 'synset': 'encyclopedist.n.01', 'name': 'encyclopedist'}, {'id': 15702, 'synset': 'endomorph.n.01', 'name': 'endomorph'}, {'id': 15703, 'synset': 'enemy.n.02', 'name': 'enemy'}, {'id': 15704, 'synset': 'energizer.n.01', 'name': 'energizer'}, {'id': 15705, 'synset': 'end_man.n.02', 'name': 'end_man'}, {'id': 15706, 'synset': 'end_man.n.01', 'name': 'end_man'}, {'id': 15707, 'synset': 'endorser.n.02', 'name': 'endorser'}, {'id': 15708, 'synset': 'enjoyer.n.01', 'name': 'enjoyer'}, {'id': 15709, 'synset': 'enlisted_woman.n.01', 'name': 'enlisted_woman'}, {'id': 15710, 'synset': 'enophile.n.01', 'name': 'enophile'}, {'id': 15711, 'synset': 'entrant.n.04', 'name': 'entrant'}, {'id': 15712, 'synset': 'entrant.n.03', 'name': 'entrant'}, {'id': 15713, 'synset': 'entrepreneur.n.01', 'name': 'entrepreneur'}, {'id': 15714, 'synset': 'envoy.n.01', 'name': 'envoy'}, {'id': 15715, 'synset': 'enzymologist.n.01', 'name': 'enzymologist'}, {'id': 15716, 'synset': 'eparch.n.01', 'name': 'eparch'}, {'id': 15717, 'synset': 'epidemiologist.n.01', 'name': 'epidemiologist'}, {'id': 15718, 'synset': 'epigone.n.01', 'name': 'epigone'}, {'id': 15719, 'synset': 'epileptic.n.01', 'name': 'epileptic'}, {'id': 15720, 'synset': 'episcopalian.n.01', 'name': 'Episcopalian'}, {'id': 15721, 'synset': 'equerry.n.02', 'name': 'equerry'}, {'id': 15722, 'synset': 'equerry.n.01', 'name': 'equerry'}, {'id': 15723, 'synset': 'erotic.n.01', 'name': 'erotic'}, {'id': 15724, 'synset': 'escapee.n.01', 'name': 'escapee'}, {'id': 15725, 'synset': 'escapist.n.01', 'name': 'escapist'}, {'id': 15726, 'synset': 'eskimo.n.01', 'name': 'Eskimo'}, {'id': 15727, 'synset': 'espionage_agent.n.01', 'name': 'espionage_agent'}, {'id': 15728, 'synset': 'esthetician.n.01', 'name': 'esthetician'}, {'id': 15729, 'synset': 'etcher.n.01', 'name': 'etcher'}, {'id': 15730, 'synset': 'ethnologist.n.01', 'name': 'ethnologist'}, {'id': 15731, 'synset': 'etonian.n.01', 'name': 'Etonian'}, {'id': 15732, 'synset': 'etymologist.n.01', 'name': 'etymologist'}, {'id': 15733, 'synset': 'evangelist.n.01', 'name': 'evangelist'}, {'id': 15734, 'synset': 'evangelist.n.02', 'name': 'Evangelist'}, {'id': 15735, 'synset': 'event_planner.n.01', 'name': 'event_planner'}, {'id': 15736, 'synset': 'examiner.n.02', 'name': 'examiner'}, {'id': 15737, 'synset': 'examiner.n.01', 'name': 'examiner'}, {'id': 15738, 'synset': 'exarch.n.03', 'name': 'exarch'}, {'id': 15739, 'synset': 'executant.n.01', 'name': 'executant'}, {'id': 15740, 'synset': 'executive_secretary.n.01', 'name': 'executive_secretary'}, {'id': 15741, 'synset': 'executive_vice_president.n.01', 'name': 'executive_vice_president'}, {'id': 15742, 'synset': 'executrix.n.01', 'name': 'executrix'}, {'id': 15743, 'synset': 'exegete.n.01', 'name': 'exegete'}, {'id': 15744, 'synset': 'exhibitor.n.01', 'name': 'exhibitor'}, {'id': 15745, 'synset': 'exhibitionist.n.02', 'name': 'exhibitionist'}, {'id': 15746, 'synset': 'exile.n.01', 'name': 'exile'}, {'id': 15747, 'synset': 'existentialist.n.01', 'name': 'existentialist'}, {'id': 15748, 'synset': 'exorcist.n.02', 'name': 'exorcist'}, {'id': 15749, 'synset': 'ex-spouse.n.01', 'name': 'ex-spouse'}, {'id': 15750, 'synset': 'extern.n.01', 'name': 'extern'}, {'id': 15751, 'synset': 'extremist.n.01', 'name': 'extremist'}, {'id': 15752, 'synset': 'extrovert.n.01', 'name': 'extrovert'}, {'id': 15753, 'synset': 'eyewitness.n.01', 'name': 'eyewitness'}, {'id': 15754, 'synset': 'facilitator.n.01', 'name': 'facilitator'}, {'id': 15755, 'synset': 'fairy_godmother.n.01', 'name': 'fairy_godmother'}, {'id': 15756, 'synset': 'falangist.n.01', 'name': 'falangist'}, {'id': 15757, 'synset': 'falconer.n.01', 'name': 'falconer'}, {'id': 15758, 'synset': 'falsifier.n.01', 'name': 'falsifier'}, {'id': 15759, 'synset': 'familiar.n.01', 'name': 'familiar'}, {'id': 15760, 'synset': 'fan.n.03', 'name': 'fan'}, {'id': 15761, 'synset': 'fanatic.n.01', 'name': 'fanatic'}, {'id': 15762, 'synset': 'fancier.n.01', 'name': 'fancier'}, {'id': 15763, 'synset': 'farm_boy.n.01', 'name': 'farm_boy'}, {'id': 15764, 'synset': 'farmer.n.01', 'name': 'farmer'}, {'id': 15765, 'synset': 'farmhand.n.01', 'name': 'farmhand'}, {'id': 15766, 'synset': 'fascist.n.01', 'name': 'fascist'}, {'id': 15767, 'synset': 'fascista.n.01', 'name': 'fascista'}, {'id': 15768, 'synset': 'fatalist.n.01', 'name': 'fatalist'}, {'id': 15769, 'synset': 'father.n.01', 'name': 'father'}, {'id': 15770, 'synset': 'father.n.03', 'name': 'Father'}, {'id': 15771, 'synset': 'father-figure.n.01', 'name': 'father-figure'}, {'id': 15772, 'synset': 'father-in-law.n.01', 'name': 'father-in-law'}, {'id': 15773, 'synset': 'fauntleroy.n.01', 'name': 'Fauntleroy'}, {'id': 15774, 'synset': 'fauve.n.01', 'name': 'Fauve'}, {'id': 15775, 'synset': 'favorite_son.n.01', 'name': 'favorite_son'}, {'id': 15776, 'synset': 'featherweight.n.03', 'name': 'featherweight'}, {'id': 15777, 'synset': 'federalist.n.02', 'name': 'federalist'}, {'id': 15778, 'synset': 'fellow_traveler.n.01', 'name': 'fellow_traveler'}, {'id': 15779, 'synset': 'female_aristocrat.n.01', 'name': 'female_aristocrat'}, {'id': 15780, 'synset': 'female_offspring.n.01', 'name': 'female_offspring'}, {'id': 15781, 'synset': 'female_child.n.01', 'name': 'female_child'}, {'id': 15782, 'synset': 'fence.n.02', 'name': 'fence'}, {'id': 15783, 'synset': 'fiance.n.01', 'name': 'fiance'}, {'id': 15784, 'synset': 'fielder.n.02', 'name': 'fielder'}, {'id': 15785, 'synset': 'field_judge.n.01', 'name': 'field_judge'}, {'id': 15786, 'synset': 'fighter_pilot.n.01', 'name': 'fighter_pilot'}, {'id': 15787, 'synset': 'filer.n.01', 'name': 'filer'}, {'id': 15788, 'synset': 'film_director.n.01', 'name': 'film_director'}, {'id': 15789, 'synset': 'finder.n.01', 'name': 'finder'}, {'id': 15790, 'synset': 'fire_chief.n.01', 'name': 'fire_chief'}, {'id': 15791, 'synset': 'fire-eater.n.03', 'name': 'fire-eater'}, {'id': 15792, 'synset': 'fire-eater.n.02', 'name': 'fire-eater'}, {'id': 15793, 'synset': 'fireman.n.04', 'name': 'fireman'}, {'id': 15794, 'synset': 'fire_marshall.n.01', 'name': 'fire_marshall'}, {'id': 15795, 'synset': 'fire_walker.n.01', 'name': 'fire_walker'}, {'id': 15796, 'synset': 'first_baseman.n.01', 'name': 'first_baseman'}, {'id': 15797, 'synset': 'firstborn.n.01', 'name': 'firstborn'}, {'id': 15798, 'synset': 'first_lady.n.02', 'name': 'first_lady'}, {'id': 15799, 'synset': 'first_lieutenant.n.01', 'name': 'first_lieutenant'}, {'id': 15800, 'synset': 'first_offender.n.01', 'name': 'first_offender'}, {'id': 15801, 'synset': 'first_sergeant.n.01', 'name': 'first_sergeant'}, {'id': 15802, 'synset': 'fishmonger.n.01', 'name': 'fishmonger'}, {'id': 15803, 'synset': 'flagellant.n.02', 'name': 'flagellant'}, {'id': 15804, 'synset': 'flag_officer.n.01', 'name': 'flag_officer'}, {'id': 15805, 'synset': 'flak_catcher.n.01', 'name': 'flak_catcher'}, {'id': 15806, 'synset': 'flanker_back.n.01', 'name': 'flanker_back'}, {'id': 15807, 'synset': 'flapper.n.01', 'name': 'flapper'}, {'id': 15808, 'synset': 'flatmate.n.01', 'name': 'flatmate'}, {'id': 15809, 'synset': 'flatterer.n.01', 'name': 'flatterer'}, {'id': 15810, 'synset': 'flibbertigibbet.n.01', 'name': 'flibbertigibbet'}, {'id': 15811, 'synset': 'flight_surgeon.n.01', 'name': 'flight_surgeon'}, {'id': 15812, 'synset': 'floorwalker.n.01', 'name': 'floorwalker'}, {'id': 15813, 'synset': 'flop.n.02', 'name': 'flop'}, {'id': 15814, 'synset': 'florentine.n.01', 'name': 'Florentine'}, {'id': 15815, 'synset': 'flower_girl.n.02', 'name': 'flower_girl'}, {'id': 15816, 'synset': 'flower_girl.n.01', 'name': 'flower_girl'}, {'id': 15817, 'synset': 'flutist.n.01', 'name': 'flutist'}, {'id': 15818, 'synset': 'fly-by-night.n.01', 'name': 'fly-by-night'}, {'id': 15819, 'synset': 'flyweight.n.02', 'name': 'flyweight'}, {'id': 15820, 'synset': 'flyweight.n.01', 'name': 'flyweight'}, {'id': 15821, 'synset': 'foe.n.02', 'name': 'foe'}, {'id': 15822, 'synset': 'folk_dancer.n.01', 'name': 'folk_dancer'}, {'id': 15823, 'synset': 'folk_poet.n.01', 'name': 'folk_poet'}, {'id': 15824, 'synset': 'follower.n.01', 'name': 'follower'}, {'id': 15825, 'synset': 'football_hero.n.01', 'name': 'football_hero'}, {'id': 15826, 'synset': 'football_player.n.01', 'name': 'football_player'}, {'id': 15827, 'synset': 'footman.n.01', 'name': 'footman'}, {'id': 15828, 'synset': 'forefather.n.01', 'name': 'forefather'}, {'id': 15829, 'synset': 'foremother.n.01', 'name': 'foremother'}, {'id': 15830, 'synset': 'foreign_agent.n.01', 'name': 'foreign_agent'}, {'id': 15831, 'synset': 'foreigner.n.02', 'name': 'foreigner'}, {'id': 15832, 'synset': 'boss.n.03', 'name': 'boss'}, {'id': 15833, 'synset': 'foreman.n.02', 'name': 'foreman'}, {'id': 15834, 'synset': 'forester.n.02', 'name': 'forester'}, {'id': 15835, 'synset': 'forewoman.n.02', 'name': 'forewoman'}, {'id': 15836, 'synset': 'forger.n.02', 'name': 'forger'}, {'id': 15837, 'synset': 'forward.n.01', 'name': 'forward'}, {'id': 15838, 'synset': 'foster-brother.n.01', 'name': 'foster-brother'}, {'id': 15839, 'synset': 'foster-father.n.01', 'name': 'foster-father'}, {'id': 15840, 'synset': 'foster-mother.n.01', 'name': 'foster-mother'}, {'id': 15841, 'synset': 'foster-sister.n.01', 'name': 'foster-sister'}, {'id': 15842, 'synset': 'foster-son.n.01', 'name': 'foster-son'}, {'id': 15843, 'synset': 'founder.n.02', 'name': 'founder'}, {'id': 15844, 'synset': 'foundress.n.01', 'name': 'foundress'}, {'id': 15845, 'synset': 'four-minute_man.n.01', 'name': 'four-minute_man'}, {'id': 15846, 'synset': 'framer.n.02', 'name': 'framer'}, {'id': 15847, 'synset': 'francophobe.n.01', 'name': 'Francophobe'}, {'id': 15848, 'synset': 'freak.n.01', 'name': 'freak'}, {'id': 15849, 'synset': 'free_agent.n.02', 'name': 'free_agent'}, {'id': 15850, 'synset': 'free_agent.n.01', 'name': 'free_agent'}, {'id': 15851, 'synset': 'freedom_rider.n.01', 'name': 'freedom_rider'}, {'id': 15852, 'synset': 'free-liver.n.01', 'name': 'free-liver'}, {'id': 15853, 'synset': 'freeloader.n.01', 'name': 'freeloader'}, {'id': 15854, 'synset': 'free_trader.n.01', 'name': 'free_trader'}, {'id': 15855, 'synset': 'freudian.n.01', 'name': 'Freudian'}, {'id': 15856, 'synset': 'friar.n.01', 'name': 'friar'}, {'id': 15857, 'synset': 'monk.n.01', 'name': 'monk'}, {'id': 15858, 'synset': 'frontierswoman.n.01', 'name': 'frontierswoman'}, {'id': 15859, 'synset': 'front_man.n.01', 'name': 'front_man'}, {'id': 15860, 'synset': 'frotteur.n.01', 'name': 'frotteur'}, {'id': 15861, 'synset': 'fucker.n.02', 'name': 'fucker'}, {'id': 15862, 'synset': 'fucker.n.01', 'name': 'fucker'}, {'id': 15863, 'synset': 'fuddy-duddy.n.01', 'name': 'fuddy-duddy'}, {'id': 15864, 'synset': 'fullback.n.01', 'name': 'fullback'}, {'id': 15865, 'synset': 'funambulist.n.01', 'name': 'funambulist'}, {'id': 15866, 'synset': 'fundamentalist.n.01', 'name': 'fundamentalist'}, {'id': 15867, 'synset': 'fundraiser.n.01', 'name': 'fundraiser'}, {'id': 15868, 'synset': 'futurist.n.01', 'name': 'futurist'}, {'id': 15869, 'synset': 'gadgeteer.n.01', 'name': 'gadgeteer'}, {'id': 15870, 'synset': 'gagman.n.02', 'name': 'gagman'}, {'id': 15871, 'synset': 'gagman.n.01', 'name': 'gagman'}, {'id': 15872, 'synset': 'gainer.n.01', 'name': 'gainer'}, {'id': 15873, 'synset': 'gal.n.03', 'name': 'gal'}, {'id': 15874, 'synset': 'galoot.n.01', 'name': 'galoot'}, {'id': 15875, 'synset': 'gambist.n.01', 'name': 'gambist'}, {'id': 15876, 'synset': 'gambler.n.01', 'name': 'gambler'}, {'id': 15877, 'synset': 'gamine.n.02', 'name': 'gamine'}, {'id': 15878, 'synset': 'garbage_man.n.01', 'name': 'garbage_man'}, {'id': 15879, 'synset': 'gardener.n.02', 'name': 'gardener'}, {'id': 15880, 'synset': 'garment_cutter.n.01', 'name': 'garment_cutter'}, {'id': 15881, 'synset': 'garroter.n.01', 'name': 'garroter'}, {'id': 15882, 'synset': 'gasman.n.01', 'name': 'gasman'}, {'id': 15883, 'synset': 'gastroenterologist.n.01', 'name': 'gastroenterologist'}, {'id': 15884, 'synset': 'gatherer.n.01', 'name': 'gatherer'}, {'id': 15885, 'synset': 'gawker.n.01', 'name': 'gawker'}, {'id': 15886, 'synset': 'gendarme.n.01', 'name': 'gendarme'}, {'id': 15887, 'synset': 'general.n.01', 'name': 'general'}, {'id': 15888, 'synset': 'generator.n.03', 'name': 'generator'}, {'id': 15889, 'synset': 'geneticist.n.01', 'name': 'geneticist'}, {'id': 15890, 'synset': 'genitor.n.01', 'name': 'genitor'}, {'id': 15891, 'synset': 'gent.n.01', 'name': 'gent'}, {'id': 15892, 'synset': 'geologist.n.01', 'name': 'geologist'}, {'id': 15893, 'synset': 'geophysicist.n.01', 'name': 'geophysicist'}, {'id': 15894, 'synset': 'ghostwriter.n.01', 'name': 'ghostwriter'}, {'id': 15895, 'synset': 'gibson_girl.n.01', 'name': 'Gibson_girl'}, {'id': 15896, 'synset': 'girl.n.01', 'name': 'girl'}, {'id': 15897, 'synset': 'girlfriend.n.02', 'name': 'girlfriend'}, {'id': 15898, 'synset': 'girlfriend.n.01', 'name': 'girlfriend'}, {'id': 15899, 'synset': 'girl_wonder.n.01', 'name': 'girl_wonder'}, {'id': 15900, 'synset': 'girondist.n.01', 'name': 'Girondist'}, {'id': 15901, 'synset': 'gitano.n.01', 'name': 'gitano'}, {'id': 15902, 'synset': 'gladiator.n.01', 'name': 'gladiator'}, {'id': 15903, 'synset': 'glassblower.n.01', 'name': 'glassblower'}, {'id': 15904, 'synset': 'gleaner.n.02', 'name': 'gleaner'}, {'id': 15905, 'synset': 'goat_herder.n.01', 'name': 'goat_herder'}, {'id': 15906, 'synset': 'godchild.n.01', 'name': 'godchild'}, {'id': 15907, 'synset': 'godfather.n.01', 'name': 'godfather'}, {'id': 15908, 'synset': 'godparent.n.01', 'name': 'godparent'}, {'id': 15909, 'synset': 'godson.n.01', 'name': 'godson'}, {'id': 15910, 'synset': 'gofer.n.01', 'name': 'gofer'}, {'id': 15911, 'synset': 'goffer.n.01', 'name': 'goffer'}, {'id': 15912, 'synset': 'goldsmith.n.01', 'name': 'goldsmith'}, {'id': 15913, 'synset': 'golfer.n.01', 'name': 'golfer'}, {'id': 15914, 'synset': 'gondolier.n.01', 'name': 'gondolier'}, {'id': 15915, 'synset': 'good_guy.n.01', 'name': 'good_guy'}, {'id': 15916, 'synset': 'good_old_boy.n.01', 'name': 'good_old_boy'}, {'id': 15917, 'synset': 'good_samaritan.n.01', 'name': 'good_Samaritan'}, {'id': 15918, 'synset': 'gossip_columnist.n.01', 'name': 'gossip_columnist'}, {'id': 15919, 'synset': 'gouger.n.01', 'name': 'gouger'}, {'id': 15920, 'synset': 'governor_general.n.01', 'name': 'governor_general'}, {'id': 15921, 'synset': 'grabber.n.01', 'name': 'grabber'}, {'id': 15922, 'synset': 'grader.n.01', 'name': 'grader'}, {'id': 15923, 'synset': 'graduate_nurse.n.01', 'name': 'graduate_nurse'}, {'id': 15924, 'synset': 'grammarian.n.01', 'name': 'grammarian'}, {'id': 15925, 'synset': 'granddaughter.n.01', 'name': 'granddaughter'}, {'id': 15926, 'synset': 'grande_dame.n.01', 'name': 'grande_dame'}, {'id': 15927, 'synset': 'grandfather.n.01', 'name': 'grandfather'}, {'id': 15928, 'synset': 'grand_inquisitor.n.01', 'name': 'Grand_Inquisitor'}, {'id': 15929, 'synset': 'grandma.n.01', 'name': 'grandma'}, {'id': 15930, 'synset': 'grandmaster.n.01', 'name': 'grandmaster'}, {'id': 15931, 'synset': 'grandparent.n.01', 'name': 'grandparent'}, {'id': 15932, 'synset': 'grantee.n.01', 'name': 'grantee'}, {'id': 15933, 'synset': 'granter.n.01', 'name': 'granter'}, {'id': 15934, 'synset': 'grass_widower.n.01', 'name': 'grass_widower'}, {'id': 15935, 'synset': 'great-aunt.n.01', 'name': 'great-aunt'}, {'id': 15936, 'synset': 'great_grandchild.n.01', 'name': 'great_grandchild'}, {'id': 15937, 'synset': 'great_granddaughter.n.01', 'name': 'great_granddaughter'}, {'id': 15938, 'synset': 'great_grandmother.n.01', 'name': 'great_grandmother'}, {'id': 15939, 'synset': 'great_grandparent.n.01', 'name': 'great_grandparent'}, {'id': 15940, 'synset': 'great_grandson.n.01', 'name': 'great_grandson'}, {'id': 15941, 'synset': 'great-nephew.n.01', 'name': 'great-nephew'}, {'id': 15942, 'synset': 'great-niece.n.01', 'name': 'great-niece'}, {'id': 15943, 'synset': 'green_beret.n.01', 'name': 'Green_Beret'}, {'id': 15944, 'synset': 'grenadier.n.01', 'name': 'grenadier'}, {'id': 15945, 'synset': 'greeter.n.01', 'name': 'greeter'}, {'id': 15946, 'synset': 'gringo.n.01', 'name': 'gringo'}, {'id': 15947, 'synset': 'grinner.n.01', 'name': 'grinner'}, {'id': 15948, 'synset': 'grocer.n.01', 'name': 'grocer'}, {'id': 15949, 'synset': 'groom.n.03', 'name': 'groom'}, {'id': 15950, 'synset': 'groom.n.01', 'name': 'groom'}, {'id': 15951, 'synset': 'grouch.n.01', 'name': 'grouch'}, {'id': 15952, 'synset': 'group_captain.n.01', 'name': 'group_captain'}, {'id': 15953, 'synset': 'grunter.n.01', 'name': 'grunter'}, {'id': 15954, 'synset': 'prison_guard.n.01', 'name': 'prison_guard'}, {'id': 15955, 'synset': 'guard.n.01', 'name': 'guard'}, {'id': 15956, 'synset': 'guesser.n.01', 'name': 'guesser'}, {'id': 15957, 'synset': 'guest.n.01', 'name': 'guest'}, {'id': 15958, 'synset': 'guest.n.03', 'name': 'guest'}, {'id': 15959, 'synset': 'guest_of_honor.n.01', 'name': 'guest_of_honor'}, {'id': 15960, 'synset': 'guest_worker.n.01', 'name': 'guest_worker'}, {'id': 15961, 'synset': 'guide.n.02', 'name': 'guide'}, {'id': 15962, 'synset': 'guitarist.n.01', 'name': 'guitarist'}, {'id': 15963, 'synset': 'gunnery_sergeant.n.01', 'name': 'gunnery_sergeant'}, {'id': 15964, 'synset': 'guru.n.01', 'name': 'guru'}, {'id': 15965, 'synset': 'guru.n.03', 'name': 'guru'}, {'id': 15966, 'synset': 'guvnor.n.01', 'name': 'guvnor'}, {'id': 15967, 'synset': 'guy.n.01', 'name': 'guy'}, {'id': 15968, 'synset': 'gymnast.n.01', 'name': 'gymnast'}, {'id': 15969, 'synset': 'gym_rat.n.01', 'name': 'gym_rat'}, {'id': 15970, 'synset': 'gynecologist.n.01', 'name': 'gynecologist'}, {'id': 15971, 'synset': 'gypsy.n.02', 'name': 'Gypsy'}, {'id': 15972, 'synset': 'hack.n.01', 'name': 'hack'}, {'id': 15973, 'synset': 'hacker.n.02', 'name': 'hacker'}, {'id': 15974, 'synset': 'haggler.n.01', 'name': 'haggler'}, {'id': 15975, 'synset': 'hairdresser.n.01', 'name': 'hairdresser'}, {'id': 15976, 'synset': 'hakim.n.02', 'name': 'hakim'}, {'id': 15977, 'synset': 'hakka.n.01', 'name': 'Hakka'}, {'id': 15978, 'synset': 'halberdier.n.01', 'name': 'halberdier'}, {'id': 15979, 'synset': 'halfback.n.01', 'name': 'halfback'}, {'id': 15980, 'synset': 'half_blood.n.01', 'name': 'half_blood'}, {'id': 15981, 'synset': 'hand.n.10', 'name': 'hand'}, {'id': 15982, 'synset': 'animal_trainer.n.01', 'name': 'animal_trainer'}, {'id': 15983, 'synset': 'handyman.n.01', 'name': 'handyman'}, {'id': 15984, 'synset': 'hang_glider.n.01', 'name': 'hang_glider'}, {'id': 15985, 'synset': 'hardliner.n.01', 'name': 'hardliner'}, {'id': 15986, 'synset': 'harlequin.n.01', 'name': 'harlequin'}, {'id': 15987, 'synset': 'harmonizer.n.02', 'name': 'harmonizer'}, {'id': 15988, 'synset': 'hash_head.n.01', 'name': 'hash_head'}, {'id': 15989, 'synset': 'hatchet_man.n.01', 'name': 'hatchet_man'}, {'id': 15990, 'synset': 'hater.n.01', 'name': 'hater'}, {'id': 15991, 'synset': 'hatmaker.n.01', 'name': 'hatmaker'}, {'id': 15992, 'synset': 'headman.n.02', 'name': 'headman'}, {'id': 15993, 'synset': 'headmaster.n.01', 'name': 'headmaster'}, {'id': 15994, 'synset': 'head_nurse.n.01', 'name': 'head_nurse'}, {'id': 15995, 'synset': 'hearer.n.01', 'name': 'hearer'}, {'id': 15996, 'synset': 'heartbreaker.n.01', 'name': 'heartbreaker'}, {'id': 15997, 'synset': 'heathen.n.01', 'name': 'heathen'}, {'id': 15998, 'synset': 'heavyweight.n.02', 'name': 'heavyweight'}, {'id': 15999, 'synset': 'heavy.n.01', 'name': 'heavy'}, {'id': 16000, 'synset': 'heckler.n.01', 'name': 'heckler'}, {'id': 16001, 'synset': 'hedger.n.02', 'name': 'hedger'}, {'id': 16002, 'synset': 'hedger.n.01', 'name': 'hedger'}, {'id': 16003, 'synset': 'hedonist.n.01', 'name': 'hedonist'}, {'id': 16004, 'synset': 'heir.n.01', 'name': 'heir'}, {'id': 16005, 'synset': 'heir_apparent.n.01', 'name': 'heir_apparent'}, {'id': 16006, 'synset': 'heiress.n.01', 'name': 'heiress'}, {'id': 16007, 'synset': 'heir_presumptive.n.01', 'name': 'heir_presumptive'}, {'id': 16008, 'synset': 'hellion.n.01', 'name': 'hellion'}, {'id': 16009, 'synset': 'helmsman.n.01', 'name': 'helmsman'}, {'id': 16010, 'synset': 'hire.n.01', 'name': 'hire'}, {'id': 16011, 'synset': 'hematologist.n.01', 'name': 'hematologist'}, {'id': 16012, 'synset': 'hemiplegic.n.01', 'name': 'hemiplegic'}, {'id': 16013, 'synset': 'herald.n.01', 'name': 'herald'}, {'id': 16014, 'synset': 'herbalist.n.01', 'name': 'herbalist'}, {'id': 16015, 'synset': 'herder.n.02', 'name': 'herder'}, {'id': 16016, 'synset': 'hermaphrodite.n.01', 'name': 'hermaphrodite'}, {'id': 16017, 'synset': 'heroine.n.02', 'name': 'heroine'}, {'id': 16018, 'synset': 'heroin_addict.n.01', 'name': 'heroin_addict'}, {'id': 16019, 'synset': 'hero_worshiper.n.01', 'name': 'hero_worshiper'}, {'id': 16020, 'synset': 'herr.n.01', 'name': 'Herr'}, {'id': 16021, 'synset': 'highbinder.n.01', 'name': 'highbinder'}, {'id': 16022, 'synset': 'highbrow.n.01', 'name': 'highbrow'}, {'id': 16023, 'synset': 'high_commissioner.n.01', 'name': 'high_commissioner'}, {'id': 16024, 'synset': 'highflier.n.01', 'name': 'highflier'}, {'id': 16025, 'synset': 'highlander.n.02', 'name': 'Highlander'}, {'id': 16026, 'synset': 'high-muck-a-muck.n.01', 'name': 'high-muck-a-muck'}, {'id': 16027, 'synset': 'high_priest.n.01', 'name': 'high_priest'}, {'id': 16028, 'synset': 'highjacker.n.01', 'name': 'highjacker'}, {'id': 16029, 'synset': 'hireling.n.01', 'name': 'hireling'}, {'id': 16030, 'synset': 'historian.n.01', 'name': 'historian'}, {'id': 16031, 'synset': 'hitchhiker.n.01', 'name': 'hitchhiker'}, {'id': 16032, 'synset': 'hitter.n.02', 'name': 'hitter'}, {'id': 16033, 'synset': 'hobbyist.n.01', 'name': 'hobbyist'}, {'id': 16034, 'synset': 'holdout.n.01', 'name': 'holdout'}, {'id': 16035, 'synset': 'holdover.n.01', 'name': 'holdover'}, {'id': 16036, 'synset': 'holdup_man.n.01', 'name': 'holdup_man'}, {'id': 16037, 'synset': 'homeboy.n.02', 'name': 'homeboy'}, {'id': 16038, 'synset': 'homeboy.n.01', 'name': 'homeboy'}, {'id': 16039, 'synset': 'home_buyer.n.01', 'name': 'home_buyer'}, {'id': 16040, 'synset': 'homegirl.n.01', 'name': 'homegirl'}, {'id': 16041, 'synset': 'homeless.n.01', 'name': 'homeless'}, {'id': 16042, 'synset': 'homeopath.n.01', 'name': 'homeopath'}, {'id': 16043, 'synset': 'honest_woman.n.01', 'name': 'honest_woman'}, {'id': 16044, 'synset': 'honor_guard.n.01', 'name': 'honor_guard'}, {'id': 16045, 'synset': 'hooker.n.05', 'name': 'hooker'}, {'id': 16046, 'synset': 'hoper.n.01', 'name': 'hoper'}, {'id': 16047, 'synset': 'hornist.n.01', 'name': 'hornist'}, {'id': 16048, 'synset': 'horseman.n.01', 'name': 'horseman'}, {'id': 16049, 'synset': 'horse_trader.n.01', 'name': 'horse_trader'}, {'id': 16050, 'synset': 'horsewoman.n.01', 'name': 'horsewoman'}, {'id': 16051, 'synset': 'horse_wrangler.n.01', 'name': 'horse_wrangler'}, {'id': 16052, 'synset': 'horticulturist.n.01', 'name': 'horticulturist'}, {'id': 16053, 'synset': 'hospital_chaplain.n.01', 'name': 'hospital_chaplain'}, {'id': 16054, 'synset': 'host.n.08', 'name': 'host'}, {'id': 16055, 'synset': 'host.n.01', 'name': 'host'}, {'id': 16056, 'synset': 'hostess.n.01', 'name': 'hostess'}, {'id': 16057, 'synset': 'hotelier.n.01', 'name': 'hotelier'}, {'id': 16058, 'synset': 'housekeeper.n.01', 'name': 'housekeeper'}, {'id': 16059, 'synset': 'housemaster.n.01', 'name': 'housemaster'}, {'id': 16060, 'synset': 'housemate.n.01', 'name': 'housemate'}, {'id': 16061, 'synset': 'house_physician.n.01', 'name': 'house_physician'}, {'id': 16062, 'synset': 'house_sitter.n.01', 'name': 'house_sitter'}, {'id': 16063, 'synset': 'housing_commissioner.n.01', 'name': 'housing_commissioner'}, {'id': 16064, 'synset': 'huckster.n.01', 'name': 'huckster'}, {'id': 16065, 'synset': 'hugger.n.01', 'name': 'hugger'}, {'id': 16066, 'synset': 'humanist.n.02', 'name': 'humanist'}, {'id': 16067, 'synset': 'humanitarian.n.01', 'name': 'humanitarian'}, {'id': 16068, 'synset': 'hunk.n.01', 'name': 'hunk'}, {'id': 16069, 'synset': 'huntress.n.01', 'name': 'huntress'}, {'id': 16070, 'synset': 'ex-husband.n.01', 'name': 'ex-husband'}, {'id': 16071, 'synset': 'hydrologist.n.01', 'name': 'hydrologist'}, {'id': 16072, 'synset': 'hyperope.n.01', 'name': 'hyperope'}, {'id': 16073, 'synset': 'hypertensive.n.01', 'name': 'hypertensive'}, {'id': 16074, 'synset': 'hypnotist.n.01', 'name': 'hypnotist'}, {'id': 16075, 'synset': 'hypocrite.n.01', 'name': 'hypocrite'}, {'id': 16076, 'synset': 'iceman.n.01', 'name': 'iceman'}, {'id': 16077, 'synset': 'iconoclast.n.02', 'name': 'iconoclast'}, {'id': 16078, 'synset': 'ideologist.n.01', 'name': 'ideologist'}, {'id': 16079, 'synset': 'idol.n.02', 'name': 'idol'}, {'id': 16080, 'synset': 'idolizer.n.01', 'name': 'idolizer'}, {'id': 16081, 'synset': 'imam.n.01', 'name': 'imam'}, {'id': 16082, 'synset': 'imperialist.n.01', 'name': 'imperialist'}, {'id': 16083, 'synset': 'important_person.n.01', 'name': 'important_person'}, {'id': 16084, 'synset': 'inamorato.n.01', 'name': 'inamorato'}, {'id': 16085, 'synset': 'incumbent.n.01', 'name': 'incumbent'}, {'id': 16086, 'synset': 'incurable.n.01', 'name': 'incurable'}, {'id': 16087, 'synset': 'inductee.n.01', 'name': 'inductee'}, {'id': 16088, 'synset': 'industrialist.n.01', 'name': 'industrialist'}, {'id': 16089, 'synset': 'infanticide.n.01', 'name': 'infanticide'}, {'id': 16090, 'synset': 'inferior.n.01', 'name': 'inferior'}, {'id': 16091, 'synset': 'infernal.n.01', 'name': 'infernal'}, {'id': 16092, 'synset': 'infielder.n.01', 'name': 'infielder'}, {'id': 16093, 'synset': 'infiltrator.n.02', 'name': 'infiltrator'}, {'id': 16094, 'synset': 'informer.n.01', 'name': 'informer'}, {'id': 16095, 'synset': 'ingenue.n.02', 'name': 'ingenue'}, {'id': 16096, 'synset': 'ingenue.n.01', 'name': 'ingenue'}, {'id': 16097, 'synset': 'polymath.n.01', 'name': 'polymath'}, {'id': 16098, 'synset': 'in-law.n.01', 'name': 'in-law'}, {'id': 16099, 'synset': 'inquiry_agent.n.01', 'name': 'inquiry_agent'}, {'id': 16100, 'synset': 'inspector.n.01', 'name': 'inspector'}, {'id': 16101, 'synset': 'inspector_general.n.01', 'name': 'inspector_general'}, {'id': 16102, 'synset': 'instigator.n.02', 'name': 'instigator'}, {'id': 16103, 'synset': 'insurance_broker.n.01', 'name': 'insurance_broker'}, {'id': 16104, 'synset': 'insurgent.n.01', 'name': 'insurgent'}, {'id': 16105, 'synset': 'intelligence_analyst.n.01', 'name': 'intelligence_analyst'}, {'id': 16106, 'synset': 'interior_designer.n.01', 'name': 'interior_designer'}, {'id': 16107, 'synset': 'interlocutor.n.02', 'name': 'interlocutor'}, {'id': 16108, 'synset': 'interlocutor.n.01', 'name': 'interlocutor'}, {'id': 16109, 'synset': 'international_grandmaster.n.01', 'name': 'International_Grandmaster'}, {'id': 16110, 'synset': 'internationalist.n.02', 'name': 'internationalist'}, {'id': 16111, 'synset': 'internist.n.01', 'name': 'internist'}, {'id': 16112, 'synset': 'interpreter.n.01', 'name': 'interpreter'}, {'id': 16113, 'synset': 'interpreter.n.02', 'name': 'interpreter'}, {'id': 16114, 'synset': 'intervenor.n.01', 'name': 'intervenor'}, {'id': 16115, 'synset': 'introvert.n.01', 'name': 'introvert'}, {'id': 16116, 'synset': 'invader.n.01', 'name': 'invader'}, {'id': 16117, 'synset': 'invalidator.n.01', 'name': 'invalidator'}, {'id': 16118, 'synset': 'investigator.n.02', 'name': 'investigator'}, {'id': 16119, 'synset': 'investor.n.01', 'name': 'investor'}, {'id': 16120, 'synset': 'invigilator.n.01', 'name': 'invigilator'}, {'id': 16121, 'synset': 'irreligionist.n.01', 'name': 'irreligionist'}, {'id': 16122, 'synset': 'ivy_leaguer.n.01', 'name': 'Ivy_Leaguer'}, {'id': 16123, 'synset': 'jack_of_all_trades.n.01', 'name': 'Jack_of_all_trades'}, {'id': 16124, 'synset': 'jacksonian.n.01', 'name': 'Jacksonian'}, {'id': 16125, 'synset': 'jane_doe.n.01', 'name': 'Jane_Doe'}, {'id': 16126, 'synset': 'janissary.n.01', 'name': 'janissary'}, {'id': 16127, 'synset': 'jat.n.01', 'name': 'Jat'}, {'id': 16128, 'synset': 'javanese.n.01', 'name': 'Javanese'}, {'id': 16129, 'synset': 'jekyll_and_hyde.n.01', 'name': 'Jekyll_and_Hyde'}, {'id': 16130, 'synset': 'jester.n.01', 'name': 'jester'}, {'id': 16131, 'synset': 'jesuit.n.01', 'name': 'Jesuit'}, {'id': 16132, 'synset': 'jezebel.n.02', 'name': 'jezebel'}, {'id': 16133, 'synset': 'jilt.n.01', 'name': 'jilt'}, {'id': 16134, 'synset': 'jobber.n.01', 'name': 'jobber'}, {'id': 16135, 'synset': 'job_candidate.n.01', 'name': 'job_candidate'}, {'id': 16136, 'synset': "job's_comforter.n.01", 'name': "Job's_comforter"}, {'id': 16137, 'synset': 'jockey.n.01', 'name': 'jockey'}, {'id': 16138, 'synset': 'john_doe.n.02', 'name': 'John_Doe'}, {'id': 16139, 'synset': 'journalist.n.01', 'name': 'journalist'}, {'id': 16140, 'synset': 'judge.n.01', 'name': 'judge'}, {'id': 16141, 'synset': 'judge_advocate.n.01', 'name': 'judge_advocate'}, {'id': 16142, 'synset': 'juggler.n.01', 'name': 'juggler'}, {'id': 16143, 'synset': 'jungian.n.01', 'name': 'Jungian'}, {'id': 16144, 'synset': 'junior.n.03', 'name': 'junior'}, {'id': 16145, 'synset': 'junior.n.02', 'name': 'junior'}, {'id': 16146, 'synset': 'junior.n.04', 'name': 'Junior'}, {'id': 16147, 'synset': 'junior_lightweight.n.01', 'name': 'junior_lightweight'}, {'id': 16148, 'synset': 'junior_middleweight.n.01', 'name': 'junior_middleweight'}, {'id': 16149, 'synset': 'jurist.n.01', 'name': 'jurist'}, {'id': 16150, 'synset': 'juror.n.01', 'name': 'juror'}, {'id': 16151, 'synset': 'justice_of_the_peace.n.01', 'name': 'justice_of_the_peace'}, {'id': 16152, 'synset': 'justiciar.n.01', 'name': 'justiciar'}, {'id': 16153, 'synset': 'kachina.n.01', 'name': 'kachina'}, {'id': 16154, 'synset': 'keyboardist.n.01', 'name': 'keyboardist'}, {'id': 16155, 'synset': 'khedive.n.01', 'name': 'Khedive'}, {'id': 16156, 'synset': 'kingmaker.n.02', 'name': 'kingmaker'}, {'id': 16157, 'synset': 'king.n.02', 'name': 'king'}, {'id': 16158, 'synset': "king's_counsel.n.01", 'name': "King's_Counsel"}, {'id': 16159, 'synset': 'counsel_to_the_crown.n.01', 'name': 'Counsel_to_the_Crown'}, {'id': 16160, 'synset': 'kin.n.01', 'name': 'kin'}, {'id': 16161, 'synset': 'enate.n.01', 'name': 'enate'}, {'id': 16162, 'synset': 'kink.n.03', 'name': 'kink'}, {'id': 16163, 'synset': 'kinswoman.n.01', 'name': 'kinswoman'}, {'id': 16164, 'synset': 'kisser.n.01', 'name': 'kisser'}, {'id': 16165, 'synset': 'kitchen_help.n.01', 'name': 'kitchen_help'}, {'id': 16166, 'synset': 'kitchen_police.n.01', 'name': 'kitchen_police'}, {'id': 16167, 'synset': 'klansman.n.01', 'name': 'Klansman'}, {'id': 16168, 'synset': 'kleptomaniac.n.01', 'name': 'kleptomaniac'}, {'id': 16169, 'synset': 'kneeler.n.01', 'name': 'kneeler'}, {'id': 16170, 'synset': 'knight.n.01', 'name': 'knight'}, {'id': 16171, 'synset': 'knocker.n.01', 'name': 'knocker'}, {'id': 16172, 'synset': 'knower.n.01', 'name': 'knower'}, {'id': 16173, 'synset': 'know-it-all.n.01', 'name': 'know-it-all'}, {'id': 16174, 'synset': 'kolkhoznik.n.01', 'name': 'kolkhoznik'}, {'id': 16175, 'synset': 'kshatriya.n.01', 'name': 'Kshatriya'}, {'id': 16176, 'synset': 'labor_coach.n.01', 'name': 'labor_coach'}, {'id': 16177, 'synset': 'laborer.n.01', 'name': 'laborer'}, {'id': 16178, 'synset': 'labourite.n.01', 'name': 'Labourite'}, {'id': 16179, 'synset': 'lady.n.01', 'name': 'lady'}, {'id': 16180, 'synset': 'lady-in-waiting.n.01', 'name': 'lady-in-waiting'}, {'id': 16181, 'synset': "lady's_maid.n.01", 'name': "lady's_maid"}, {'id': 16182, 'synset': 'lama.n.01', 'name': 'lama'}, {'id': 16183, 'synset': 'lamb.n.04', 'name': 'lamb'}, {'id': 16184, 'synset': 'lame_duck.n.01', 'name': 'lame_duck'}, {'id': 16185, 'synset': 'lamplighter.n.01', 'name': 'lamplighter'}, {'id': 16186, 'synset': 'land_agent.n.02', 'name': 'land_agent'}, {'id': 16187, 'synset': 'landgrave.n.01', 'name': 'landgrave'}, {'id': 16188, 'synset': 'landlubber.n.02', 'name': 'landlubber'}, {'id': 16189, 'synset': 'landlubber.n.01', 'name': 'landlubber'}, {'id': 16190, 'synset': 'landowner.n.01', 'name': 'landowner'}, {'id': 16191, 'synset': 'landscape_architect.n.01', 'name': 'landscape_architect'}, {'id': 16192, 'synset': 'langlaufer.n.01', 'name': 'langlaufer'}, {'id': 16193, 'synset': 'languisher.n.01', 'name': 'languisher'}, {'id': 16194, 'synset': 'lapidary.n.01', 'name': 'lapidary'}, {'id': 16195, 'synset': 'lass.n.01', 'name': 'lass'}, {'id': 16196, 'synset': 'latin.n.03', 'name': 'Latin'}, {'id': 16197, 'synset': 'latin.n.02', 'name': 'Latin'}, {'id': 16198, 'synset': 'latitudinarian.n.01', 'name': 'latitudinarian'}, {'id': 16199, 'synset': "jehovah's_witness.n.01", 'name': "Jehovah's_Witness"}, {'id': 16200, 'synset': 'law_agent.n.01', 'name': 'law_agent'}, {'id': 16201, 'synset': 'lawgiver.n.01', 'name': 'lawgiver'}, {'id': 16202, 'synset': 'lawman.n.01', 'name': 'lawman'}, {'id': 16203, 'synset': 'law_student.n.01', 'name': 'law_student'}, {'id': 16204, 'synset': 'lawyer.n.01', 'name': 'lawyer'}, {'id': 16205, 'synset': 'lay_reader.n.01', 'name': 'lay_reader'}, {'id': 16206, 'synset': 'lazybones.n.01', 'name': 'lazybones'}, {'id': 16207, 'synset': 'leaker.n.01', 'name': 'leaker'}, {'id': 16208, 'synset': 'leaseholder.n.01', 'name': 'leaseholder'}, {'id': 16209, 'synset': 'lector.n.02', 'name': 'lector'}, {'id': 16210, 'synset': 'lector.n.01', 'name': 'lector'}, {'id': 16211, 'synset': 'lecturer.n.02', 'name': 'lecturer'}, {'id': 16212, 'synset': 'left-hander.n.02', 'name': 'left-hander'}, {'id': 16213, 'synset': 'legal_representative.n.01', 'name': 'legal_representative'}, {'id': 16214, 'synset': 'legate.n.01', 'name': 'legate'}, {'id': 16215, 'synset': 'legatee.n.01', 'name': 'legatee'}, {'id': 16216, 'synset': 'legionnaire.n.02', 'name': 'legionnaire'}, {'id': 16217, 'synset': 'letterman.n.01', 'name': 'letterman'}, {'id': 16218, 'synset': 'liberator.n.01', 'name': 'liberator'}, {'id': 16219, 'synset': 'licenser.n.01', 'name': 'licenser'}, {'id': 16220, 'synset': 'licentiate.n.01', 'name': 'licentiate'}, {'id': 16221, 'synset': 'lieutenant.n.01', 'name': 'lieutenant'}, {'id': 16222, 'synset': 'lieutenant_colonel.n.01', 'name': 'lieutenant_colonel'}, {'id': 16223, 'synset': 'lieutenant_commander.n.01', 'name': 'lieutenant_commander'}, {'id': 16224, 'synset': 'lieutenant_junior_grade.n.01', 'name': 'lieutenant_junior_grade'}, {'id': 16225, 'synset': 'life.n.08', 'name': 'life'}, {'id': 16226, 'synset': 'lifeguard.n.01', 'name': 'lifeguard'}, {'id': 16227, 'synset': 'life_tenant.n.01', 'name': 'life_tenant'}, {'id': 16228, 'synset': 'light_flyweight.n.01', 'name': 'light_flyweight'}, {'id': 16229, 'synset': 'light_heavyweight.n.03', 'name': 'light_heavyweight'}, {'id': 16230, 'synset': 'light_heavyweight.n.01', 'name': 'light_heavyweight'}, {'id': 16231, 'synset': "light-o'-love.n.01", 'name': "light-o'-love"}, {'id': 16232, 'synset': 'lightweight.n.01', 'name': 'lightweight'}, {'id': 16233, 'synset': 'lightweight.n.04', 'name': 'lightweight'}, {'id': 16234, 'synset': 'lightweight.n.03', 'name': 'lightweight'}, {'id': 16235, 'synset': 'lilliputian.n.01', 'name': 'lilliputian'}, {'id': 16236, 'synset': 'limnologist.n.01', 'name': 'limnologist'}, {'id': 16237, 'synset': 'lineman.n.01', 'name': 'lineman'}, {'id': 16238, 'synset': 'line_officer.n.01', 'name': 'line_officer'}, {'id': 16239, 'synset': 'lion-hunter.n.01', 'name': 'lion-hunter'}, {'id': 16240, 'synset': 'lisper.n.01', 'name': 'lisper'}, {'id': 16241, 'synset': 'lister.n.02', 'name': 'lister'}, {'id': 16242, 'synset': 'literary_critic.n.01', 'name': 'literary_critic'}, {'id': 16243, 'synset': 'literate.n.01', 'name': 'literate'}, {'id': 16244, 'synset': 'litigant.n.01', 'name': 'litigant'}, {'id': 16245, 'synset': 'litterer.n.01', 'name': 'litterer'}, {'id': 16246, 'synset': 'little_brother.n.01', 'name': 'little_brother'}, {'id': 16247, 'synset': 'little_sister.n.01', 'name': 'little_sister'}, {'id': 16248, 'synset': 'lobbyist.n.01', 'name': 'lobbyist'}, {'id': 16249, 'synset': 'locksmith.n.01', 'name': 'locksmith'}, {'id': 16250, 'synset': 'locum_tenens.n.01', 'name': 'locum_tenens'}, {'id': 16251, 'synset': 'lord.n.03', 'name': 'Lord'}, {'id': 16252, 'synset': 'loser.n.03', 'name': 'loser'}, {'id': 16253, 'synset': 'loser.n.01', 'name': 'loser'}, {'id': 16254, 'synset': 'failure.n.04', 'name': 'failure'}, {'id': 16255, 'synset': 'lothario.n.01', 'name': 'Lothario'}, {'id': 16256, 'synset': 'loudmouth.n.01', 'name': 'loudmouth'}, {'id': 16257, 'synset': 'lowerclassman.n.01', 'name': 'lowerclassman'}, {'id': 16258, 'synset': 'lowlander.n.01', 'name': 'Lowlander'}, {'id': 16259, 'synset': 'loyalist.n.01', 'name': 'loyalist'}, {'id': 16260, 'synset': 'luddite.n.01', 'name': 'Luddite'}, {'id': 16261, 'synset': 'lumberman.n.01', 'name': 'lumberman'}, {'id': 16262, 'synset': 'lumper.n.02', 'name': 'lumper'}, {'id': 16263, 'synset': 'bedlamite.n.01', 'name': 'bedlamite'}, {'id': 16264, 'synset': 'pyromaniac.n.01', 'name': 'pyromaniac'}, {'id': 16265, 'synset': 'lutist.n.01', 'name': 'lutist'}, {'id': 16266, 'synset': 'lutheran.n.01', 'name': 'Lutheran'}, {'id': 16267, 'synset': 'lyricist.n.01', 'name': 'lyricist'}, {'id': 16268, 'synset': 'macebearer.n.01', 'name': 'macebearer'}, {'id': 16269, 'synset': 'machinist.n.01', 'name': 'machinist'}, {'id': 16270, 'synset': 'madame.n.01', 'name': 'madame'}, {'id': 16271, 'synset': 'maenad.n.01', 'name': 'maenad'}, {'id': 16272, 'synset': 'maestro.n.01', 'name': 'maestro'}, {'id': 16273, 'synset': 'magdalen.n.01', 'name': 'magdalen'}, {'id': 16274, 'synset': 'magician.n.01', 'name': 'magician'}, {'id': 16275, 'synset': 'magus.n.01', 'name': 'magus'}, {'id': 16276, 'synset': 'maharani.n.01', 'name': 'maharani'}, {'id': 16277, 'synset': 'mahatma.n.01', 'name': 'mahatma'}, {'id': 16278, 'synset': 'maid.n.02', 'name': 'maid'}, {'id': 16279, 'synset': 'maid.n.01', 'name': 'maid'}, {'id': 16280, 'synset': 'major.n.01', 'name': 'major'}, {'id': 16281, 'synset': 'major.n.03', 'name': 'major'}, {'id': 16282, 'synset': 'major-domo.n.01', 'name': 'major-domo'}, {'id': 16283, 'synset': 'maker.n.01', 'name': 'maker'}, {'id': 16284, 'synset': 'malahini.n.01', 'name': 'malahini'}, {'id': 16285, 'synset': 'malcontent.n.01', 'name': 'malcontent'}, {'id': 16286, 'synset': 'malik.n.01', 'name': 'malik'}, {'id': 16287, 'synset': 'malingerer.n.01', 'name': 'malingerer'}, {'id': 16288, 'synset': 'malthusian.n.01', 'name': 'Malthusian'}, {'id': 16289, 'synset': 'adonis.n.01', 'name': 'adonis'}, {'id': 16290, 'synset': 'man.n.03', 'name': 'man'}, {'id': 16291, 'synset': 'man.n.05', 'name': 'man'}, {'id': 16292, 'synset': 'manageress.n.01', 'name': 'manageress'}, {'id': 16293, 'synset': 'mandarin.n.03', 'name': 'mandarin'}, {'id': 16294, 'synset': 'maneuverer.n.01', 'name': 'maneuverer'}, {'id': 16295, 'synset': 'maniac.n.02', 'name': 'maniac'}, {'id': 16296, 'synset': 'manichaean.n.01', 'name': 'Manichaean'}, {'id': 16297, 'synset': 'manicurist.n.01', 'name': 'manicurist'}, {'id': 16298, 'synset': 'manipulator.n.02', 'name': 'manipulator'}, {'id': 16299, 'synset': 'man-at-arms.n.01', 'name': 'man-at-arms'}, {'id': 16300, 'synset': 'man_of_action.n.01', 'name': 'man_of_action'}, {'id': 16301, 'synset': 'man_of_letters.n.01', 'name': 'man_of_letters'}, {'id': 16302, 'synset': 'manufacturer.n.02', 'name': 'manufacturer'}, {'id': 16303, 'synset': 'marcher.n.02', 'name': 'marcher'}, {'id': 16304, 'synset': 'marchioness.n.02', 'name': 'marchioness'}, {'id': 16305, 'synset': 'margrave.n.02', 'name': 'margrave'}, {'id': 16306, 'synset': 'margrave.n.01', 'name': 'margrave'}, {'id': 16307, 'synset': 'marine.n.01', 'name': 'Marine'}, {'id': 16308, 'synset': 'marquess.n.02', 'name': 'marquess'}, {'id': 16309, 'synset': 'marquis.n.02', 'name': 'marquis'}, {'id': 16310, 'synset': 'marshal.n.02', 'name': 'marshal'}, {'id': 16311, 'synset': 'martinet.n.01', 'name': 'martinet'}, {'id': 16312, 'synset': 'masochist.n.01', 'name': 'masochist'}, {'id': 16313, 'synset': 'mason.n.04', 'name': 'mason'}, {'id': 16314, 'synset': 'masquerader.n.01', 'name': 'masquerader'}, {'id': 16315, 'synset': 'masseur.n.01', 'name': 'masseur'}, {'id': 16316, 'synset': 'masseuse.n.01', 'name': 'masseuse'}, {'id': 16317, 'synset': 'master.n.04', 'name': 'master'}, {'id': 16318, 'synset': 'master.n.07', 'name': 'master'}, {'id': 16319, 'synset': 'master-at-arms.n.01', 'name': 'master-at-arms'}, {'id': 16320, 'synset': 'master_of_ceremonies.n.01', 'name': 'master_of_ceremonies'}, {'id': 16321, 'synset': 'masturbator.n.01', 'name': 'masturbator'}, {'id': 16322, 'synset': 'matchmaker.n.01', 'name': 'matchmaker'}, {'id': 16323, 'synset': 'mate.n.01', 'name': 'mate'}, {'id': 16324, 'synset': 'mate.n.08', 'name': 'mate'}, {'id': 16325, 'synset': 'mate.n.03', 'name': 'mate'}, {'id': 16326, 'synset': 'mater.n.01', 'name': 'mater'}, {'id': 16327, 'synset': 'material.n.05', 'name': 'material'}, {'id': 16328, 'synset': 'materialist.n.02', 'name': 'materialist'}, {'id': 16329, 'synset': 'matriarch.n.01', 'name': 'matriarch'}, {'id': 16330, 'synset': 'matriarch.n.02', 'name': 'matriarch'}, {'id': 16331, 'synset': 'matriculate.n.01', 'name': 'matriculate'}, {'id': 16332, 'synset': 'matron.n.01', 'name': 'matron'}, {'id': 16333, 'synset': 'mayor.n.01', 'name': 'mayor'}, {'id': 16334, 'synset': 'mayoress.n.01', 'name': 'mayoress'}, {'id': 16335, 'synset': 'mechanical_engineer.n.01', 'name': 'mechanical_engineer'}, {'id': 16336, 'synset': 'medalist.n.02', 'name': 'medalist'}, {'id': 16337, 'synset': 'medical_officer.n.01', 'name': 'medical_officer'}, {'id': 16338, 'synset': 'medical_practitioner.n.01', 'name': 'medical_practitioner'}, {'id': 16339, 'synset': 'medical_scientist.n.01', 'name': 'medical_scientist'}, {'id': 16340, 'synset': 'medium.n.09', 'name': 'medium'}, {'id': 16341, 'synset': 'megalomaniac.n.01', 'name': 'megalomaniac'}, {'id': 16342, 'synset': 'melancholic.n.01', 'name': 'melancholic'}, {'id': 16343, 'synset': 'melkite.n.01', 'name': 'Melkite'}, {'id': 16344, 'synset': 'melter.n.01', 'name': 'melter'}, {'id': 16345, 'synset': 'nonmember.n.01', 'name': 'nonmember'}, {'id': 16346, 'synset': 'board_member.n.01', 'name': 'board_member'}, {'id': 16347, 'synset': 'clansman.n.01', 'name': 'clansman'}, {'id': 16348, 'synset': 'memorizer.n.01', 'name': 'memorizer'}, {'id': 16349, 'synset': 'mendelian.n.01', 'name': 'Mendelian'}, {'id': 16350, 'synset': 'mender.n.01', 'name': 'mender'}, {'id': 16351, 'synset': 'mesoamerican.n.01', 'name': 'Mesoamerican'}, {'id': 16352, 'synset': 'messmate.n.01', 'name': 'messmate'}, {'id': 16353, 'synset': 'mestiza.n.01', 'name': 'mestiza'}, {'id': 16354, 'synset': 'meteorologist.n.01', 'name': 'meteorologist'}, {'id': 16355, 'synset': 'meter_maid.n.01', 'name': 'meter_maid'}, {'id': 16356, 'synset': 'methodist.n.01', 'name': 'Methodist'}, {'id': 16357, 'synset': 'metis.n.01', 'name': 'Metis'}, {'id': 16358, 'synset': 'metropolitan.n.01', 'name': 'metropolitan'}, {'id': 16359, 'synset': 'mezzo-soprano.n.01', 'name': 'mezzo-soprano'}, {'id': 16360, 'synset': 'microeconomist.n.01', 'name': 'microeconomist'}, {'id': 16361, 'synset': 'middle-aged_man.n.01', 'name': 'middle-aged_man'}, {'id': 16362, 'synset': 'middlebrow.n.01', 'name': 'middlebrow'}, {'id': 16363, 'synset': 'middleweight.n.01', 'name': 'middleweight'}, {'id': 16364, 'synset': 'midwife.n.01', 'name': 'midwife'}, {'id': 16365, 'synset': 'mikado.n.01', 'name': 'mikado'}, {'id': 16366, 'synset': 'milanese.n.01', 'name': 'Milanese'}, {'id': 16367, 'synset': 'miler.n.02', 'name': 'miler'}, {'id': 16368, 'synset': 'miles_gloriosus.n.01', 'name': 'miles_gloriosus'}, {'id': 16369, 'synset': 'military_attache.n.01', 'name': 'military_attache'}, {'id': 16370, 'synset': 'military_chaplain.n.01', 'name': 'military_chaplain'}, {'id': 16371, 'synset': 'military_leader.n.01', 'name': 'military_leader'}, {'id': 16372, 'synset': 'military_officer.n.01', 'name': 'military_officer'}, {'id': 16373, 'synset': 'military_policeman.n.01', 'name': 'military_policeman'}, {'id': 16374, 'synset': 'mill_agent.n.01', 'name': 'mill_agent'}, {'id': 16375, 'synset': 'mill-hand.n.01', 'name': 'mill-hand'}, {'id': 16376, 'synset': 'millionairess.n.01', 'name': 'millionairess'}, {'id': 16377, 'synset': 'millwright.n.01', 'name': 'millwright'}, {'id': 16378, 'synset': 'minder.n.01', 'name': 'minder'}, {'id': 16379, 'synset': 'mining_engineer.n.01', 'name': 'mining_engineer'}, {'id': 16380, 'synset': 'minister.n.02', 'name': 'minister'}, {'id': 16381, 'synset': 'ministrant.n.01', 'name': 'ministrant'}, {'id': 16382, 'synset': 'minor_leaguer.n.01', 'name': 'minor_leaguer'}, {'id': 16383, 'synset': 'minuteman.n.01', 'name': 'Minuteman'}, {'id': 16384, 'synset': 'misanthrope.n.01', 'name': 'misanthrope'}, {'id': 16385, 'synset': 'misfit.n.01', 'name': 'misfit'}, {'id': 16386, 'synset': 'mistress.n.03', 'name': 'mistress'}, {'id': 16387, 'synset': 'mistress.n.01', 'name': 'mistress'}, {'id': 16388, 'synset': 'mixed-blood.n.01', 'name': 'mixed-blood'}, {'id': 16389, 'synset': 'model.n.03', 'name': 'model'}, {'id': 16390, 'synset': 'class_act.n.01', 'name': 'class_act'}, {'id': 16391, 'synset': 'modeler.n.01', 'name': 'modeler'}, {'id': 16392, 'synset': 'modifier.n.02', 'name': 'modifier'}, {'id': 16393, 'synset': 'molecular_biologist.n.01', 'name': 'molecular_biologist'}, {'id': 16394, 'synset': 'monegasque.n.01', 'name': 'Monegasque'}, {'id': 16395, 'synset': 'monetarist.n.01', 'name': 'monetarist'}, {'id': 16396, 'synset': 'moneygrubber.n.01', 'name': 'moneygrubber'}, {'id': 16397, 'synset': 'moneymaker.n.01', 'name': 'moneymaker'}, {'id': 16398, 'synset': 'mongoloid.n.01', 'name': 'Mongoloid'}, {'id': 16399, 'synset': 'monolingual.n.01', 'name': 'monolingual'}, {'id': 16400, 'synset': 'monologist.n.01', 'name': 'monologist'}, {'id': 16401, 'synset': 'moonlighter.n.01', 'name': 'moonlighter'}, {'id': 16402, 'synset': 'moralist.n.01', 'name': 'moralist'}, {'id': 16403, 'synset': 'morosoph.n.01', 'name': 'morosoph'}, {'id': 16404, 'synset': 'morris_dancer.n.01', 'name': 'morris_dancer'}, {'id': 16405, 'synset': 'mortal_enemy.n.01', 'name': 'mortal_enemy'}, {'id': 16406, 'synset': 'mortgagee.n.01', 'name': 'mortgagee'}, {'id': 16407, 'synset': 'mortician.n.01', 'name': 'mortician'}, {'id': 16408, 'synset': 'moss-trooper.n.01', 'name': 'moss-trooper'}, {'id': 16409, 'synset': 'mother.n.01', 'name': 'mother'}, {'id': 16410, 'synset': 'mother.n.04', 'name': 'mother'}, {'id': 16411, 'synset': 'mother.n.03', 'name': 'mother'}, {'id': 16412, 'synset': 'mother_figure.n.01', 'name': 'mother_figure'}, {'id': 16413, 'synset': 'mother_hen.n.01', 'name': 'mother_hen'}, {'id': 16414, 'synset': 'mother-in-law.n.01', 'name': 'mother-in-law'}, {'id': 16415, 'synset': "mother's_boy.n.01", 'name': "mother's_boy"}, {'id': 16416, 'synset': "mother's_daughter.n.01", 'name': "mother's_daughter"}, {'id': 16417, 'synset': 'motorcycle_cop.n.01', 'name': 'motorcycle_cop'}, {'id': 16418, 'synset': 'motorcyclist.n.01', 'name': 'motorcyclist'}, {'id': 16419, 'synset': 'mound_builder.n.01', 'name': 'Mound_Builder'}, {'id': 16420, 'synset': 'mountebank.n.01', 'name': 'mountebank'}, {'id': 16421, 'synset': 'mourner.n.01', 'name': 'mourner'}, {'id': 16422, 'synset': 'mouthpiece.n.03', 'name': 'mouthpiece'}, {'id': 16423, 'synset': 'mover.n.03', 'name': 'mover'}, {'id': 16424, 'synset': 'moviegoer.n.01', 'name': 'moviegoer'}, {'id': 16425, 'synset': 'muffin_man.n.01', 'name': 'muffin_man'}, {'id': 16426, 'synset': 'mugwump.n.02', 'name': 'mugwump'}, {'id': 16427, 'synset': 'mullah.n.01', 'name': 'Mullah'}, {'id': 16428, 'synset': 'muncher.n.01', 'name': 'muncher'}, {'id': 16429, 'synset': 'murderess.n.01', 'name': 'murderess'}, {'id': 16430, 'synset': 'murder_suspect.n.01', 'name': 'murder_suspect'}, {'id': 16431, 'synset': 'musher.n.01', 'name': 'musher'}, {'id': 16432, 'synset': 'musician.n.01', 'name': 'musician'}, {'id': 16433, 'synset': 'musicologist.n.01', 'name': 'musicologist'}, {'id': 16434, 'synset': 'music_teacher.n.01', 'name': 'music_teacher'}, {'id': 16435, 'synset': 'musketeer.n.01', 'name': 'musketeer'}, {'id': 16436, 'synset': 'muslimah.n.01', 'name': 'Muslimah'}, {'id': 16437, 'synset': 'mutilator.n.01', 'name': 'mutilator'}, {'id': 16438, 'synset': 'mutineer.n.01', 'name': 'mutineer'}, {'id': 16439, 'synset': 'mute.n.01', 'name': 'mute'}, {'id': 16440, 'synset': 'mutterer.n.01', 'name': 'mutterer'}, {'id': 16441, 'synset': 'muzzler.n.01', 'name': 'muzzler'}, {'id': 16442, 'synset': 'mycenaen.n.01', 'name': 'Mycenaen'}, {'id': 16443, 'synset': 'mycologist.n.01', 'name': 'mycologist'}, {'id': 16444, 'synset': 'myope.n.01', 'name': 'myope'}, {'id': 16445, 'synset': 'myrmidon.n.01', 'name': 'myrmidon'}, {'id': 16446, 'synset': 'mystic.n.01', 'name': 'mystic'}, {'id': 16447, 'synset': 'mythologist.n.01', 'name': 'mythologist'}, {'id': 16448, 'synset': 'naif.n.01', 'name': 'naif'}, {'id': 16449, 'synset': 'nailer.n.01', 'name': 'nailer'}, {'id': 16450, 'synset': 'namby-pamby.n.01', 'name': 'namby-pamby'}, {'id': 16451, 'synset': 'name_dropper.n.01', 'name': 'name_dropper'}, {'id': 16452, 'synset': 'namer.n.01', 'name': 'namer'}, {'id': 16453, 'synset': 'nan.n.01', 'name': 'nan'}, {'id': 16454, 'synset': 'nanny.n.01', 'name': 'nanny'}, {'id': 16455, 'synset': 'narc.n.01', 'name': 'narc'}, {'id': 16456, 'synset': 'narcissist.n.01', 'name': 'narcissist'}, {'id': 16457, 'synset': 'nark.n.01', 'name': 'nark'}, {'id': 16458, 'synset': 'nationalist.n.02', 'name': 'nationalist'}, {'id': 16459, 'synset': 'nautch_girl.n.01', 'name': 'nautch_girl'}, {'id': 16460, 'synset': 'naval_commander.n.01', 'name': 'naval_commander'}, {'id': 16461, 'synset': 'navy_seal.n.01', 'name': 'Navy_SEAL'}, {'id': 16462, 'synset': 'obstructionist.n.01', 'name': 'obstructionist'}, {'id': 16463, 'synset': 'nazarene.n.02', 'name': 'Nazarene'}, {'id': 16464, 'synset': 'nazarene.n.01', 'name': 'Nazarene'}, {'id': 16465, 'synset': 'nazi.n.01', 'name': 'Nazi'}, {'id': 16466, 'synset': 'nebbish.n.01', 'name': 'nebbish'}, {'id': 16467, 'synset': 'necker.n.01', 'name': 'necker'}, {'id': 16468, 'synset': 'neonate.n.01', 'name': 'neonate'}, {'id': 16469, 'synset': 'nephew.n.01', 'name': 'nephew'}, {'id': 16470, 'synset': 'neurobiologist.n.01', 'name': 'neurobiologist'}, {'id': 16471, 'synset': 'neurologist.n.01', 'name': 'neurologist'}, {'id': 16472, 'synset': 'neurosurgeon.n.01', 'name': 'neurosurgeon'}, {'id': 16473, 'synset': 'neutral.n.01', 'name': 'neutral'}, {'id': 16474, 'synset': 'neutralist.n.01', 'name': 'neutralist'}, {'id': 16475, 'synset': 'newcomer.n.01', 'name': 'newcomer'}, {'id': 16476, 'synset': 'newcomer.n.02', 'name': 'newcomer'}, {'id': 16477, 'synset': 'new_dealer.n.01', 'name': 'New_Dealer'}, {'id': 16478, 'synset': 'newspaper_editor.n.01', 'name': 'newspaper_editor'}, {'id': 16479, 'synset': 'newsreader.n.01', 'name': 'newsreader'}, {'id': 16480, 'synset': 'newtonian.n.01', 'name': 'Newtonian'}, {'id': 16481, 'synset': 'niece.n.01', 'name': 'niece'}, {'id': 16482, 'synset': 'niggard.n.01', 'name': 'niggard'}, {'id': 16483, 'synset': 'night_porter.n.01', 'name': 'night_porter'}, {'id': 16484, 'synset': 'night_rider.n.01', 'name': 'night_rider'}, {'id': 16485, 'synset': 'nimby.n.01', 'name': 'NIMBY'}, {'id': 16486, 'synset': 'niqaabi.n.01', 'name': 'niqaabi'}, {'id': 16487, 'synset': 'nitpicker.n.01', 'name': 'nitpicker'}, {'id': 16488, 'synset': 'nobelist.n.01', 'name': 'Nobelist'}, {'id': 16489, 'synset': 'noc.n.01', 'name': 'NOC'}, {'id': 16490, 'synset': 'noncandidate.n.01', 'name': 'noncandidate'}, {'id': 16491, 'synset': 'noncommissioned_officer.n.01', 'name': 'noncommissioned_officer'}, {'id': 16492, 'synset': 'nondescript.n.01', 'name': 'nondescript'}, {'id': 16493, 'synset': 'nondriver.n.01', 'name': 'nondriver'}, {'id': 16494, 'synset': 'nonparticipant.n.01', 'name': 'nonparticipant'}, {'id': 16495, 'synset': 'nonperson.n.01', 'name': 'nonperson'}, {'id': 16496, 'synset': 'nonresident.n.01', 'name': 'nonresident'}, {'id': 16497, 'synset': 'nonsmoker.n.01', 'name': 'nonsmoker'}, {'id': 16498, 'synset': 'northern_baptist.n.01', 'name': 'Northern_Baptist'}, {'id': 16499, 'synset': 'noticer.n.01', 'name': 'noticer'}, {'id': 16500, 'synset': 'novelist.n.01', 'name': 'novelist'}, {'id': 16501, 'synset': 'novitiate.n.02', 'name': 'novitiate'}, {'id': 16502, 'synset': 'nuclear_chemist.n.01', 'name': 'nuclear_chemist'}, {'id': 16503, 'synset': 'nudger.n.01', 'name': 'nudger'}, {'id': 16504, 'synset': 'nullipara.n.01', 'name': 'nullipara'}, {'id': 16505, 'synset': 'number_theorist.n.01', 'name': 'number_theorist'}, {'id': 16506, 'synset': 'nurse.n.01', 'name': 'nurse'}, {'id': 16507, 'synset': 'nursling.n.01', 'name': 'nursling'}, {'id': 16508, 'synset': 'nymph.n.03', 'name': 'nymph'}, {'id': 16509, 'synset': 'nymphet.n.01', 'name': 'nymphet'}, {'id': 16510, 'synset': 'nympholept.n.01', 'name': 'nympholept'}, {'id': 16511, 'synset': 'nymphomaniac.n.01', 'name': 'nymphomaniac'}, {'id': 16512, 'synset': 'oarswoman.n.01', 'name': 'oarswoman'}, {'id': 16513, 'synset': 'oboist.n.01', 'name': 'oboist'}, {'id': 16514, 'synset': 'obscurantist.n.01', 'name': 'obscurantist'}, {'id': 16515, 'synset': 'observer.n.02', 'name': 'observer'}, {'id': 16516, 'synset': 'obstetrician.n.01', 'name': 'obstetrician'}, {'id': 16517, 'synset': 'occupier.n.02', 'name': 'occupier'}, {'id': 16518, 'synset': 'occultist.n.01', 'name': 'occultist'}, {'id': 16519, 'synset': 'wine_lover.n.01', 'name': 'wine_lover'}, {'id': 16520, 'synset': 'offerer.n.01', 'name': 'offerer'}, {'id': 16521, 'synset': 'office-bearer.n.01', 'name': 'office-bearer'}, {'id': 16522, 'synset': 'office_boy.n.01', 'name': 'office_boy'}, {'id': 16523, 'synset': 'officeholder.n.01', 'name': 'officeholder'}, {'id': 16524, 'synset': 'officiant.n.01', 'name': 'officiant'}, {'id': 16525, 'synset': 'federal.n.02', 'name': 'Federal'}, {'id': 16526, 'synset': 'oilman.n.02', 'name': 'oilman'}, {'id': 16527, 'synset': 'oil_tycoon.n.01', 'name': 'oil_tycoon'}, {'id': 16528, 'synset': 'old-age_pensioner.n.01', 'name': 'old-age_pensioner'}, {'id': 16529, 'synset': 'old_boy.n.02', 'name': 'old_boy'}, {'id': 16530, 'synset': 'old_lady.n.01', 'name': 'old_lady'}, {'id': 16531, 'synset': 'old_man.n.03', 'name': 'old_man'}, {'id': 16532, 'synset': 'oldster.n.01', 'name': 'oldster'}, {'id': 16533, 'synset': 'old-timer.n.02', 'name': 'old-timer'}, {'id': 16534, 'synset': 'old_woman.n.01', 'name': 'old_woman'}, {'id': 16535, 'synset': 'oligarch.n.01', 'name': 'oligarch'}, {'id': 16536, 'synset': 'olympian.n.01', 'name': 'Olympian'}, {'id': 16537, 'synset': 'omnivore.n.01', 'name': 'omnivore'}, {'id': 16538, 'synset': 'oncologist.n.01', 'name': 'oncologist'}, {'id': 16539, 'synset': 'onlooker.n.01', 'name': 'onlooker'}, {'id': 16540, 'synset': 'onomancer.n.01', 'name': 'onomancer'}, {'id': 16541, 'synset': 'operator.n.03', 'name': 'operator'}, {'id': 16542, 'synset': 'opportunist.n.01', 'name': 'opportunist'}, {'id': 16543, 'synset': 'optimist.n.01', 'name': 'optimist'}, {'id': 16544, 'synset': 'orangeman.n.01', 'name': 'Orangeman'}, {'id': 16545, 'synset': 'orator.n.01', 'name': 'orator'}, {'id': 16546, 'synset': 'orderly.n.02', 'name': 'orderly'}, {'id': 16547, 'synset': 'orderly.n.01', 'name': 'orderly'}, {'id': 16548, 'synset': 'orderly_sergeant.n.01', 'name': 'orderly_sergeant'}, {'id': 16549, 'synset': 'ordinand.n.01', 'name': 'ordinand'}, {'id': 16550, 'synset': 'ordinary.n.03', 'name': 'ordinary'}, {'id': 16551, 'synset': 'organ-grinder.n.01', 'name': 'organ-grinder'}, {'id': 16552, 'synset': 'organist.n.01', 'name': 'organist'}, {'id': 16553, 'synset': 'organization_man.n.01', 'name': 'organization_man'}, {'id': 16554, 'synset': 'organizer.n.01', 'name': 'organizer'}, {'id': 16555, 'synset': 'organizer.n.02', 'name': 'organizer'}, {'id': 16556, 'synset': 'originator.n.01', 'name': 'originator'}, {'id': 16557, 'synset': 'ornithologist.n.01', 'name': 'ornithologist'}, {'id': 16558, 'synset': 'orphan.n.01', 'name': 'orphan'}, {'id': 16559, 'synset': 'orphan.n.02', 'name': 'orphan'}, {'id': 16560, 'synset': 'osteopath.n.01', 'name': 'osteopath'}, {'id': 16561, 'synset': 'out-and-outer.n.01', 'name': 'out-and-outer'}, {'id': 16562, 'synset': 'outdoorswoman.n.01', 'name': 'outdoorswoman'}, {'id': 16563, 'synset': 'outfielder.n.02', 'name': 'outfielder'}, {'id': 16564, 'synset': 'outfielder.n.01', 'name': 'outfielder'}, {'id': 16565, 'synset': 'right_fielder.n.01', 'name': 'right_fielder'}, {'id': 16566, 'synset': 'right-handed_pitcher.n.01', 'name': 'right-handed_pitcher'}, {'id': 16567, 'synset': 'outlier.n.01', 'name': 'outlier'}, {'id': 16568, 'synset': 'owner-occupier.n.01', 'name': 'owner-occupier'}, {'id': 16569, 'synset': 'oyabun.n.01', 'name': 'oyabun'}, {'id': 16570, 'synset': 'packrat.n.01', 'name': 'packrat'}, {'id': 16571, 'synset': 'padrone.n.02', 'name': 'padrone'}, {'id': 16572, 'synset': 'padrone.n.01', 'name': 'padrone'}, {'id': 16573, 'synset': 'page.n.04', 'name': 'page'}, {'id': 16574, 'synset': 'painter.n.02', 'name': 'painter'}, {'id': 16575, 'synset': 'paleo-american.n.01', 'name': 'Paleo-American'}, {'id': 16576, 'synset': 'paleontologist.n.01', 'name': 'paleontologist'}, {'id': 16577, 'synset': 'pallbearer.n.01', 'name': 'pallbearer'}, {'id': 16578, 'synset': 'palmist.n.01', 'name': 'palmist'}, {'id': 16579, 'synset': 'pamperer.n.01', 'name': 'pamperer'}, {'id': 16580, 'synset': 'panchen_lama.n.01', 'name': 'Panchen_Lama'}, {'id': 16581, 'synset': 'panelist.n.01', 'name': 'panelist'}, {'id': 16582, 'synset': 'panhandler.n.01', 'name': 'panhandler'}, {'id': 16583, 'synset': 'paparazzo.n.01', 'name': 'paparazzo'}, {'id': 16584, 'synset': 'paperboy.n.01', 'name': 'paperboy'}, {'id': 16585, 'synset': 'paperhanger.n.02', 'name': 'paperhanger'}, {'id': 16586, 'synset': 'paperhanger.n.01', 'name': 'paperhanger'}, {'id': 16587, 'synset': 'papoose.n.01', 'name': 'papoose'}, {'id': 16588, 'synset': 'pardoner.n.02', 'name': 'pardoner'}, {'id': 16589, 'synset': 'paretic.n.01', 'name': 'paretic'}, {'id': 16590, 'synset': 'parishioner.n.01', 'name': 'parishioner'}, {'id': 16591, 'synset': 'park_commissioner.n.01', 'name': 'park_commissioner'}, {'id': 16592, 'synset': 'parliamentarian.n.01', 'name': 'Parliamentarian'}, {'id': 16593, 'synset': 'parliamentary_agent.n.01', 'name': 'parliamentary_agent'}, {'id': 16594, 'synset': 'parodist.n.01', 'name': 'parodist'}, {'id': 16595, 'synset': 'parricide.n.01', 'name': 'parricide'}, {'id': 16596, 'synset': 'parrot.n.02', 'name': 'parrot'}, {'id': 16597, 'synset': 'partaker.n.01', 'name': 'partaker'}, {'id': 16598, 'synset': 'part-timer.n.01', 'name': 'part-timer'}, {'id': 16599, 'synset': 'party.n.05', 'name': 'party'}, {'id': 16600, 'synset': 'party_man.n.01', 'name': 'party_man'}, {'id': 16601, 'synset': 'passenger.n.01', 'name': 'passenger'}, {'id': 16602, 'synset': 'passer.n.03', 'name': 'passer'}, {'id': 16603, 'synset': 'paster.n.01', 'name': 'paster'}, {'id': 16604, 'synset': 'pater.n.01', 'name': 'pater'}, {'id': 16605, 'synset': 'patient.n.01', 'name': 'patient'}, {'id': 16606, 'synset': 'patriarch.n.04', 'name': 'patriarch'}, {'id': 16607, 'synset': 'patriarch.n.03', 'name': 'patriarch'}, {'id': 16608, 'synset': 'patriarch.n.02', 'name': 'patriarch'}, {'id': 16609, 'synset': 'patriot.n.01', 'name': 'patriot'}, {'id': 16610, 'synset': 'patron.n.03', 'name': 'patron'}, {'id': 16611, 'synset': 'patternmaker.n.01', 'name': 'patternmaker'}, {'id': 16612, 'synset': 'pawnbroker.n.01', 'name': 'pawnbroker'}, {'id': 16613, 'synset': 'payer.n.01', 'name': 'payer'}, {'id': 16614, 'synset': 'peacekeeper.n.01', 'name': 'peacekeeper'}, {'id': 16615, 'synset': 'peasant.n.02', 'name': 'peasant'}, {'id': 16616, 'synset': 'pedant.n.01', 'name': 'pedant'}, {'id': 16617, 'synset': 'peddler.n.01', 'name': 'peddler'}, {'id': 16618, 'synset': 'pederast.n.01', 'name': 'pederast'}, {'id': 16619, 'synset': 'penologist.n.01', 'name': 'penologist'}, {'id': 16620, 'synset': 'pentathlete.n.01', 'name': 'pentathlete'}, {'id': 16621, 'synset': 'pentecostal.n.01', 'name': 'Pentecostal'}, {'id': 16622, 'synset': 'percussionist.n.01', 'name': 'percussionist'}, {'id': 16623, 'synset': 'periodontist.n.01', 'name': 'periodontist'}, {'id': 16624, 'synset': 'peshmerga.n.01', 'name': 'peshmerga'}, {'id': 16625, 'synset': 'personality.n.02', 'name': 'personality'}, {'id': 16626, 'synset': 'personal_representative.n.01', 'name': 'personal_representative'}, {'id': 16627, 'synset': 'personage.n.01', 'name': 'personage'}, {'id': 16628, 'synset': 'persona_grata.n.01', 'name': 'persona_grata'}, {'id': 16629, 'synset': 'persona_non_grata.n.01', 'name': 'persona_non_grata'}, {'id': 16630, 'synset': 'personification.n.01', 'name': 'personification'}, {'id': 16631, 'synset': 'perspirer.n.01', 'name': 'perspirer'}, {'id': 16632, 'synset': 'pervert.n.01', 'name': 'pervert'}, {'id': 16633, 'synset': 'pessimist.n.01', 'name': 'pessimist'}, {'id': 16634, 'synset': 'pest.n.03', 'name': 'pest'}, {'id': 16635, 'synset': 'peter_pan.n.01', 'name': 'Peter_Pan'}, {'id': 16636, 'synset': 'petitioner.n.01', 'name': 'petitioner'}, {'id': 16637, 'synset': 'petit_juror.n.01', 'name': 'petit_juror'}, {'id': 16638, 'synset': 'pet_sitter.n.01', 'name': 'pet_sitter'}, {'id': 16639, 'synset': 'petter.n.01', 'name': 'petter'}, {'id': 16640, 'synset': 'pharaoh.n.01', 'name': 'Pharaoh'}, {'id': 16641, 'synset': 'pharmacist.n.01', 'name': 'pharmacist'}, {'id': 16642, 'synset': 'philanthropist.n.01', 'name': 'philanthropist'}, {'id': 16643, 'synset': 'philatelist.n.01', 'name': 'philatelist'}, {'id': 16644, 'synset': 'philosopher.n.02', 'name': 'philosopher'}, {'id': 16645, 'synset': 'phonetician.n.01', 'name': 'phonetician'}, {'id': 16646, 'synset': 'phonologist.n.01', 'name': 'phonologist'}, {'id': 16647, 'synset': 'photojournalist.n.01', 'name': 'photojournalist'}, {'id': 16648, 'synset': 'photometrist.n.01', 'name': 'photometrist'}, {'id': 16649, 'synset': 'physical_therapist.n.01', 'name': 'physical_therapist'}, {'id': 16650, 'synset': 'physicist.n.01', 'name': 'physicist'}, {'id': 16651, 'synset': 'piano_maker.n.01', 'name': 'piano_maker'}, {'id': 16652, 'synset': 'picker.n.01', 'name': 'picker'}, {'id': 16653, 'synset': 'picnicker.n.01', 'name': 'picnicker'}, {'id': 16654, 'synset': 'pilgrim.n.01', 'name': 'pilgrim'}, {'id': 16655, 'synset': 'pill.n.03', 'name': 'pill'}, {'id': 16656, 'synset': 'pillar.n.03', 'name': 'pillar'}, {'id': 16657, 'synset': 'pill_head.n.01', 'name': 'pill_head'}, {'id': 16658, 'synset': 'pilot.n.02', 'name': 'pilot'}, {'id': 16659, 'synset': 'piltdown_man.n.01', 'name': 'Piltdown_man'}, {'id': 16660, 'synset': 'pimp.n.01', 'name': 'pimp'}, {'id': 16661, 'synset': 'pipe_smoker.n.01', 'name': 'pipe_smoker'}, {'id': 16662, 'synset': 'pip-squeak.n.01', 'name': 'pip-squeak'}, {'id': 16663, 'synset': 'pisser.n.01', 'name': 'pisser'}, {'id': 16664, 'synset': 'pitcher.n.01', 'name': 'pitcher'}, {'id': 16665, 'synset': 'pitchman.n.01', 'name': 'pitchman'}, {'id': 16666, 'synset': 'placeman.n.01', 'name': 'placeman'}, {'id': 16667, 'synset': 'placer_miner.n.01', 'name': 'placer_miner'}, {'id': 16668, 'synset': 'plagiarist.n.01', 'name': 'plagiarist'}, {'id': 16669, 'synset': 'plainsman.n.01', 'name': 'plainsman'}, {'id': 16670, 'synset': 'planner.n.01', 'name': 'planner'}, {'id': 16671, 'synset': 'planter.n.01', 'name': 'planter'}, {'id': 16672, 'synset': 'plasterer.n.01', 'name': 'plasterer'}, {'id': 16673, 'synset': 'platinum_blond.n.01', 'name': 'platinum_blond'}, {'id': 16674, 'synset': 'platitudinarian.n.01', 'name': 'platitudinarian'}, {'id': 16675, 'synset': 'playboy.n.01', 'name': 'playboy'}, {'id': 16676, 'synset': 'player.n.01', 'name': 'player'}, {'id': 16677, 'synset': 'playmate.n.01', 'name': 'playmate'}, {'id': 16678, 'synset': 'pleaser.n.01', 'name': 'pleaser'}, {'id': 16679, 'synset': 'pledger.n.01', 'name': 'pledger'}, {'id': 16680, 'synset': 'plenipotentiary.n.01', 'name': 'plenipotentiary'}, {'id': 16681, 'synset': 'plier.n.01', 'name': 'plier'}, {'id': 16682, 'synset': 'plodder.n.03', 'name': 'plodder'}, {'id': 16683, 'synset': 'plodder.n.02', 'name': 'plodder'}, {'id': 16684, 'synset': 'plotter.n.02', 'name': 'plotter'}, {'id': 16685, 'synset': 'plumber.n.01', 'name': 'plumber'}, {'id': 16686, 'synset': 'pluralist.n.02', 'name': 'pluralist'}, {'id': 16687, 'synset': 'pluralist.n.01', 'name': 'pluralist'}, {'id': 16688, 'synset': 'poet.n.01', 'name': 'poet'}, {'id': 16689, 'synset': 'pointsman.n.01', 'name': 'pointsman'}, {'id': 16690, 'synset': 'point_woman.n.01', 'name': 'point_woman'}, {'id': 16691, 'synset': 'policyholder.n.01', 'name': 'policyholder'}, {'id': 16692, 'synset': 'political_prisoner.n.01', 'name': 'political_prisoner'}, {'id': 16693, 'synset': 'political_scientist.n.01', 'name': 'political_scientist'}, {'id': 16694, 'synset': 'politician.n.02', 'name': 'politician'}, {'id': 16695, 'synset': 'politician.n.03', 'name': 'politician'}, {'id': 16696, 'synset': 'pollster.n.01', 'name': 'pollster'}, {'id': 16697, 'synset': 'polluter.n.01', 'name': 'polluter'}, {'id': 16698, 'synset': 'pool_player.n.01', 'name': 'pool_player'}, {'id': 16699, 'synset': 'portraitist.n.01', 'name': 'portraitist'}, {'id': 16700, 'synset': 'poseuse.n.01', 'name': 'poseuse'}, {'id': 16701, 'synset': 'positivist.n.01', 'name': 'positivist'}, {'id': 16702, 'synset': 'postdoc.n.02', 'name': 'postdoc'}, {'id': 16703, 'synset': 'poster_girl.n.01', 'name': 'poster_girl'}, {'id': 16704, 'synset': 'postulator.n.02', 'name': 'postulator'}, {'id': 16705, 'synset': 'private_citizen.n.01', 'name': 'private_citizen'}, {'id': 16706, 'synset': 'problem_solver.n.01', 'name': 'problem_solver'}, {'id': 16707, 'synset': 'pro-lifer.n.01', 'name': 'pro-lifer'}, {'id': 16708, 'synset': 'prosthetist.n.01', 'name': 'prosthetist'}, {'id': 16709, 'synset': 'postulant.n.01', 'name': 'postulant'}, {'id': 16710, 'synset': 'potboy.n.01', 'name': 'potboy'}, {'id': 16711, 'synset': 'poultryman.n.01', 'name': 'poultryman'}, {'id': 16712, 'synset': 'power_user.n.01', 'name': 'power_user'}, {'id': 16713, 'synset': 'power_worker.n.01', 'name': 'power_worker'}, {'id': 16714, 'synset': 'practitioner.n.01', 'name': 'practitioner'}, {'id': 16715, 'synset': 'prayer.n.05', 'name': 'prayer'}, {'id': 16716, 'synset': 'preceptor.n.01', 'name': 'preceptor'}, {'id': 16717, 'synset': 'predecessor.n.01', 'name': 'predecessor'}, {'id': 16718, 'synset': 'preemptor.n.02', 'name': 'preemptor'}, {'id': 16719, 'synset': 'preemptor.n.01', 'name': 'preemptor'}, {'id': 16720, 'synset': 'premature_baby.n.01', 'name': 'premature_baby'}, {'id': 16721, 'synset': 'presbyter.n.01', 'name': 'presbyter'}, {'id': 16722, 'synset': 'presenter.n.02', 'name': 'presenter'}, {'id': 16723, 'synset': 'presentist.n.01', 'name': 'presentist'}, {'id': 16724, 'synset': 'preserver.n.03', 'name': 'preserver'}, {'id': 16725, 'synset': 'president.n.03', 'name': 'president'}, {'id': 16726, 'synset': 'president_of_the_united_states.n.01', 'name': 'President_of_the_United_States'}, {'id': 16727, 'synset': 'president.n.05', 'name': 'president'}, {'id': 16728, 'synset': 'press_agent.n.01', 'name': 'press_agent'}, {'id': 16729, 'synset': 'press_photographer.n.01', 'name': 'press_photographer'}, {'id': 16730, 'synset': 'priest.n.01', 'name': 'priest'}, {'id': 16731, 'synset': 'prima_ballerina.n.01', 'name': 'prima_ballerina'}, {'id': 16732, 'synset': 'prima_donna.n.02', 'name': 'prima_donna'}, {'id': 16733, 'synset': 'prima_donna.n.01', 'name': 'prima_donna'}, {'id': 16734, 'synset': 'primigravida.n.01', 'name': 'primigravida'}, {'id': 16735, 'synset': 'primordial_dwarf.n.01', 'name': 'primordial_dwarf'}, {'id': 16736, 'synset': 'prince_charming.n.01', 'name': 'prince_charming'}, {'id': 16737, 'synset': 'prince_consort.n.01', 'name': 'prince_consort'}, {'id': 16738, 'synset': 'princeling.n.01', 'name': 'princeling'}, {'id': 16739, 'synset': 'prince_of_wales.n.01', 'name': 'Prince_of_Wales'}, {'id': 16740, 'synset': 'princess.n.01', 'name': 'princess'}, {'id': 16741, 'synset': 'princess_royal.n.01', 'name': 'princess_royal'}, {'id': 16742, 'synset': 'principal.n.06', 'name': 'principal'}, {'id': 16743, 'synset': 'principal.n.02', 'name': 'principal'}, {'id': 16744, 'synset': 'print_seller.n.01', 'name': 'print_seller'}, {'id': 16745, 'synset': 'prior.n.01', 'name': 'prior'}, {'id': 16746, 'synset': 'private.n.01', 'name': 'private'}, {'id': 16747, 'synset': 'probationer.n.01', 'name': 'probationer'}, {'id': 16748, 'synset': 'processor.n.02', 'name': 'processor'}, {'id': 16749, 'synset': 'process-server.n.01', 'name': 'process-server'}, {'id': 16750, 'synset': 'proconsul.n.02', 'name': 'proconsul'}, {'id': 16751, 'synset': 'proconsul.n.01', 'name': 'proconsul'}, {'id': 16752, 'synset': 'proctologist.n.01', 'name': 'proctologist'}, {'id': 16753, 'synset': 'proctor.n.01', 'name': 'proctor'}, {'id': 16754, 'synset': 'procurator.n.02', 'name': 'procurator'}, {'id': 16755, 'synset': 'procurer.n.02', 'name': 'procurer'}, {'id': 16756, 'synset': 'profit_taker.n.01', 'name': 'profit_taker'}, {'id': 16757, 'synset': 'programmer.n.01', 'name': 'programmer'}, {'id': 16758, 'synset': 'promiser.n.01', 'name': 'promiser'}, {'id': 16759, 'synset': 'promoter.n.01', 'name': 'promoter'}, {'id': 16760, 'synset': 'promulgator.n.01', 'name': 'promulgator'}, {'id': 16761, 'synset': 'propagandist.n.01', 'name': 'propagandist'}, {'id': 16762, 'synset': 'propagator.n.02', 'name': 'propagator'}, {'id': 16763, 'synset': 'property_man.n.01', 'name': 'property_man'}, {'id': 16764, 'synset': 'prophetess.n.01', 'name': 'prophetess'}, {'id': 16765, 'synset': 'prophet.n.02', 'name': 'prophet'}, {'id': 16766, 'synset': 'prosecutor.n.01', 'name': 'prosecutor'}, {'id': 16767, 'synset': 'prospector.n.01', 'name': 'prospector'}, {'id': 16768, 'synset': 'protectionist.n.01', 'name': 'protectionist'}, {'id': 16769, 'synset': 'protegee.n.01', 'name': 'protegee'}, {'id': 16770, 'synset': 'protozoologist.n.01', 'name': 'protozoologist'}, {'id': 16771, 'synset': 'provost_marshal.n.01', 'name': 'provost_marshal'}, {'id': 16772, 'synset': 'pruner.n.01', 'name': 'pruner'}, {'id': 16773, 'synset': 'psalmist.n.01', 'name': 'psalmist'}, {'id': 16774, 'synset': 'psephologist.n.01', 'name': 'psephologist'}, {'id': 16775, 'synset': 'psychiatrist.n.01', 'name': 'psychiatrist'}, {'id': 16776, 'synset': 'psychic.n.01', 'name': 'psychic'}, {'id': 16777, 'synset': 'psycholinguist.n.01', 'name': 'psycholinguist'}, {'id': 16778, 'synset': 'psychophysicist.n.01', 'name': 'psychophysicist'}, {'id': 16779, 'synset': 'publican.n.01', 'name': 'publican'}, {'id': 16780, 'synset': 'pudge.n.01', 'name': 'pudge'}, {'id': 16781, 'synset': 'puerpera.n.01', 'name': 'puerpera'}, {'id': 16782, 'synset': 'punching_bag.n.01', 'name': 'punching_bag'}, {'id': 16783, 'synset': 'punter.n.02', 'name': 'punter'}, {'id': 16784, 'synset': 'punter.n.01', 'name': 'punter'}, {'id': 16785, 'synset': 'puppeteer.n.01', 'name': 'puppeteer'}, {'id': 16786, 'synset': 'puppy.n.02', 'name': 'puppy'}, {'id': 16787, 'synset': 'purchasing_agent.n.01', 'name': 'purchasing_agent'}, {'id': 16788, 'synset': 'puritan.n.02', 'name': 'puritan'}, {'id': 16789, 'synset': 'puritan.n.01', 'name': 'Puritan'}, {'id': 16790, 'synset': 'pursuer.n.02', 'name': 'pursuer'}, {'id': 16791, 'synset': 'pusher.n.03', 'name': 'pusher'}, {'id': 16792, 'synset': 'pusher.n.02', 'name': 'pusher'}, {'id': 16793, 'synset': 'pusher.n.01', 'name': 'pusher'}, {'id': 16794, 'synset': 'putz.n.01', 'name': 'putz'}, {'id': 16795, 'synset': 'pygmy.n.02', 'name': 'Pygmy'}, {'id': 16796, 'synset': 'qadi.n.01', 'name': 'qadi'}, {'id': 16797, 'synset': 'quadriplegic.n.01', 'name': 'quadriplegic'}, {'id': 16798, 'synset': 'quadruplet.n.02', 'name': 'quadruplet'}, {'id': 16799, 'synset': 'quaker.n.02', 'name': 'quaker'}, {'id': 16800, 'synset': 'quarter.n.11', 'name': 'quarter'}, {'id': 16801, 'synset': 'quarterback.n.01', 'name': 'quarterback'}, {'id': 16802, 'synset': 'quartermaster.n.01', 'name': 'quartermaster'}, {'id': 16803, 'synset': 'quartermaster_general.n.01', 'name': 'quartermaster_general'}, {'id': 16804, 'synset': 'quebecois.n.01', 'name': 'Quebecois'}, {'id': 16805, 'synset': 'queen.n.02', 'name': 'queen'}, {'id': 16806, 'synset': 'queen_of_england.n.01', 'name': 'Queen_of_England'}, {'id': 16807, 'synset': 'queen.n.03', 'name': 'queen'}, {'id': 16808, 'synset': 'queen.n.04', 'name': 'queen'}, {'id': 16809, 'synset': 'queen_consort.n.01', 'name': 'queen_consort'}, {'id': 16810, 'synset': 'queen_mother.n.01', 'name': 'queen_mother'}, {'id': 16811, 'synset': "queen's_counsel.n.01", 'name': "Queen's_Counsel"}, {'id': 16812, 'synset': 'question_master.n.01', 'name': 'question_master'}, {'id': 16813, 'synset': 'quick_study.n.01', 'name': 'quick_study'}, {'id': 16814, 'synset': 'quietist.n.01', 'name': 'quietist'}, {'id': 16815, 'synset': 'quitter.n.01', 'name': 'quitter'}, {'id': 16816, 'synset': 'rabbi.n.01', 'name': 'rabbi'}, {'id': 16817, 'synset': 'racist.n.01', 'name': 'racist'}, {'id': 16818, 'synset': 'radiobiologist.n.01', 'name': 'radiobiologist'}, {'id': 16819, 'synset': 'radiologic_technologist.n.01', 'name': 'radiologic_technologist'}, {'id': 16820, 'synset': 'radiologist.n.01', 'name': 'radiologist'}, {'id': 16821, 'synset': 'rainmaker.n.02', 'name': 'rainmaker'}, {'id': 16822, 'synset': 'raiser.n.01', 'name': 'raiser'}, {'id': 16823, 'synset': 'raja.n.01', 'name': 'raja'}, {'id': 16824, 'synset': 'rake.n.01', 'name': 'rake'}, {'id': 16825, 'synset': 'ramrod.n.02', 'name': 'ramrod'}, {'id': 16826, 'synset': 'ranch_hand.n.01', 'name': 'ranch_hand'}, {'id': 16827, 'synset': 'ranker.n.01', 'name': 'ranker'}, {'id': 16828, 'synset': 'ranter.n.01', 'name': 'ranter'}, {'id': 16829, 'synset': 'rape_suspect.n.01', 'name': 'rape_suspect'}, {'id': 16830, 'synset': 'rapper.n.01', 'name': 'rapper'}, {'id': 16831, 'synset': 'rapporteur.n.01', 'name': 'rapporteur'}, {'id': 16832, 'synset': 'rare_bird.n.01', 'name': 'rare_bird'}, {'id': 16833, 'synset': 'ratepayer.n.01', 'name': 'ratepayer'}, {'id': 16834, 'synset': 'raw_recruit.n.01', 'name': 'raw_recruit'}, {'id': 16835, 'synset': 'reader.n.01', 'name': 'reader'}, {'id': 16836, 'synset': 'reading_teacher.n.01', 'name': 'reading_teacher'}, {'id': 16837, 'synset': 'realist.n.01', 'name': 'realist'}, {'id': 16838, 'synset': 'real_estate_broker.n.01', 'name': 'real_estate_broker'}, {'id': 16839, 'synset': 'rear_admiral.n.01', 'name': 'rear_admiral'}, {'id': 16840, 'synset': 'receiver.n.05', 'name': 'receiver'}, {'id': 16841, 'synset': 'reciter.n.01', 'name': 'reciter'}, {'id': 16842, 'synset': 'recruit.n.02', 'name': 'recruit'}, {'id': 16843, 'synset': 'recruit.n.01', 'name': 'recruit'}, {'id': 16844, 'synset': 'recruiter.n.01', 'name': 'recruiter'}, {'id': 16845, 'synset': 'recruiting-sergeant.n.01', 'name': 'recruiting-sergeant'}, {'id': 16846, 'synset': 'redcap.n.01', 'name': 'redcap'}, {'id': 16847, 'synset': 'redhead.n.01', 'name': 'redhead'}, {'id': 16848, 'synset': 'redneck.n.01', 'name': 'redneck'}, {'id': 16849, 'synset': 'reeler.n.02', 'name': 'reeler'}, {'id': 16850, 'synset': 'reenactor.n.01', 'name': 'reenactor'}, {'id': 16851, 'synset': 'referral.n.01', 'name': 'referral'}, {'id': 16852, 'synset': 'referee.n.01', 'name': 'referee'}, {'id': 16853, 'synset': 'refiner.n.01', 'name': 'refiner'}, {'id': 16854, 'synset': 'reform_jew.n.01', 'name': 'Reform_Jew'}, {'id': 16855, 'synset': 'registered_nurse.n.01', 'name': 'registered_nurse'}, {'id': 16856, 'synset': 'registrar.n.01', 'name': 'registrar'}, {'id': 16857, 'synset': 'regius_professor.n.01', 'name': 'Regius_professor'}, {'id': 16858, 'synset': 'reliever.n.02', 'name': 'reliever'}, {'id': 16859, 'synset': 'anchorite.n.01', 'name': 'anchorite'}, {'id': 16860, 'synset': 'religious_leader.n.01', 'name': 'religious_leader'}, {'id': 16861, 'synset': 'remover.n.02', 'name': 'remover'}, {'id': 16862, 'synset': 'renaissance_man.n.01', 'name': 'Renaissance_man'}, {'id': 16863, 'synset': 'renegade.n.01', 'name': 'renegade'}, {'id': 16864, 'synset': 'rentier.n.01', 'name': 'rentier'}, {'id': 16865, 'synset': 'repairman.n.01', 'name': 'repairman'}, {'id': 16866, 'synset': 'reporter.n.01', 'name': 'reporter'}, {'id': 16867, 'synset': 'newswoman.n.01', 'name': 'newswoman'}, {'id': 16868, 'synset': 'representative.n.01', 'name': 'representative'}, {'id': 16869, 'synset': 'reprobate.n.01', 'name': 'reprobate'}, {'id': 16870, 'synset': 'rescuer.n.02', 'name': 'rescuer'}, {'id': 16871, 'synset': 'reservist.n.01', 'name': 'reservist'}, {'id': 16872, 'synset': 'resident_commissioner.n.01', 'name': 'resident_commissioner'}, {'id': 16873, 'synset': 'respecter.n.01', 'name': 'respecter'}, {'id': 16874, 'synset': 'restaurateur.n.01', 'name': 'restaurateur'}, {'id': 16875, 'synset': 'restrainer.n.02', 'name': 'restrainer'}, {'id': 16876, 'synset': 'retailer.n.01', 'name': 'retailer'}, {'id': 16877, 'synset': 'retiree.n.01', 'name': 'retiree'}, {'id': 16878, 'synset': 'returning_officer.n.01', 'name': 'returning_officer'}, {'id': 16879, 'synset': 'revenant.n.01', 'name': 'revenant'}, {'id': 16880, 'synset': 'revisionist.n.01', 'name': 'revisionist'}, {'id': 16881, 'synset': 'revolutionist.n.01', 'name': 'revolutionist'}, {'id': 16882, 'synset': 'rheumatologist.n.01', 'name': 'rheumatologist'}, {'id': 16883, 'synset': 'rhodesian_man.n.01', 'name': 'Rhodesian_man'}, {'id': 16884, 'synset': 'rhymer.n.01', 'name': 'rhymer'}, {'id': 16885, 'synset': 'rich_person.n.01', 'name': 'rich_person'}, {'id': 16886, 'synset': 'rider.n.03', 'name': 'rider'}, {'id': 16887, 'synset': 'riding_master.n.01', 'name': 'riding_master'}, {'id': 16888, 'synset': 'rifleman.n.02', 'name': 'rifleman'}, {'id': 16889, 'synset': 'right-hander.n.02', 'name': 'right-hander'}, {'id': 16890, 'synset': 'right-hand_man.n.01', 'name': 'right-hand_man'}, {'id': 16891, 'synset': 'ringer.n.03', 'name': 'ringer'}, {'id': 16892, 'synset': 'ringleader.n.01', 'name': 'ringleader'}, {'id': 16893, 'synset': 'roadman.n.02', 'name': 'roadman'}, {'id': 16894, 'synset': 'roarer.n.01', 'name': 'roarer'}, {'id': 16895, 'synset': 'rocket_engineer.n.01', 'name': 'rocket_engineer'}, {'id': 16896, 'synset': 'rocket_scientist.n.01', 'name': 'rocket_scientist'}, {'id': 16897, 'synset': 'rock_star.n.01', 'name': 'rock_star'}, {'id': 16898, 'synset': 'romanov.n.01', 'name': 'Romanov'}, {'id': 16899, 'synset': 'romanticist.n.02', 'name': 'romanticist'}, {'id': 16900, 'synset': 'ropemaker.n.01', 'name': 'ropemaker'}, {'id': 16901, 'synset': 'roper.n.02', 'name': 'roper'}, {'id': 16902, 'synset': 'roper.n.01', 'name': 'roper'}, {'id': 16903, 'synset': 'ropewalker.n.01', 'name': 'ropewalker'}, {'id': 16904, 'synset': 'rosebud.n.02', 'name': 'rosebud'}, {'id': 16905, 'synset': 'rosicrucian.n.02', 'name': 'Rosicrucian'}, {'id': 16906, 'synset': 'mountie.n.01', 'name': 'Mountie'}, {'id': 16907, 'synset': 'rough_rider.n.01', 'name': 'Rough_Rider'}, {'id': 16908, 'synset': 'roundhead.n.01', 'name': 'roundhead'}, {'id': 16909, 'synset': 'civil_authority.n.01', 'name': 'civil_authority'}, {'id': 16910, 'synset': 'runner.n.03', 'name': 'runner'}, {'id': 16911, 'synset': 'runner.n.02', 'name': 'runner'}, {'id': 16912, 'synset': 'runner.n.06', 'name': 'runner'}, {'id': 16913, 'synset': 'running_back.n.01', 'name': 'running_back'}, {'id': 16914, 'synset': 'rusher.n.02', 'name': 'rusher'}, {'id': 16915, 'synset': 'rustic.n.01', 'name': 'rustic'}, {'id': 16916, 'synset': 'saboteur.n.01', 'name': 'saboteur'}, {'id': 16917, 'synset': 'sadist.n.01', 'name': 'sadist'}, {'id': 16918, 'synset': 'sailing_master.n.01', 'name': 'sailing_master'}, {'id': 16919, 'synset': 'sailor.n.01', 'name': 'sailor'}, {'id': 16920, 'synset': 'salesgirl.n.01', 'name': 'salesgirl'}, {'id': 16921, 'synset': 'salesman.n.01', 'name': 'salesman'}, {'id': 16922, 'synset': 'salesperson.n.01', 'name': 'salesperson'}, {'id': 16923, 'synset': 'salvager.n.01', 'name': 'salvager'}, {'id': 16924, 'synset': 'sandwichman.n.01', 'name': 'sandwichman'}, {'id': 16925, 'synset': 'sangoma.n.01', 'name': 'sangoma'}, {'id': 16926, 'synset': 'sannup.n.01', 'name': 'sannup'}, {'id': 16927, 'synset': 'sapper.n.02', 'name': 'sapper'}, {'id': 16928, 'synset': 'sassenach.n.01', 'name': 'Sassenach'}, {'id': 16929, 'synset': 'satrap.n.01', 'name': 'satrap'}, {'id': 16930, 'synset': 'saunterer.n.01', 'name': 'saunterer'}, {'id': 16931, 'synset': 'savoyard.n.01', 'name': 'Savoyard'}, {'id': 16932, 'synset': 'sawyer.n.01', 'name': 'sawyer'}, {'id': 16933, 'synset': 'scalper.n.01', 'name': 'scalper'}, {'id': 16934, 'synset': 'scandalmonger.n.01', 'name': 'scandalmonger'}, {'id': 16935, 'synset': 'scapegrace.n.01', 'name': 'scapegrace'}, {'id': 16936, 'synset': 'scene_painter.n.02', 'name': 'scene_painter'}, {'id': 16937, 'synset': 'schemer.n.01', 'name': 'schemer'}, {'id': 16938, 'synset': 'schizophrenic.n.01', 'name': 'schizophrenic'}, {'id': 16939, 'synset': 'schlemiel.n.01', 'name': 'schlemiel'}, {'id': 16940, 'synset': 'schlockmeister.n.01', 'name': 'schlockmeister'}, {'id': 16941, 'synset': 'scholar.n.01', 'name': 'scholar'}, {'id': 16942, 'synset': 'scholiast.n.01', 'name': 'scholiast'}, {'id': 16943, 'synset': 'schoolchild.n.01', 'name': 'schoolchild'}, {'id': 16944, 'synset': 'schoolfriend.n.01', 'name': 'schoolfriend'}, {'id': 16945, 'synset': 'schoolman.n.01', 'name': 'Schoolman'}, {'id': 16946, 'synset': 'schoolmaster.n.02', 'name': 'schoolmaster'}, {'id': 16947, 'synset': 'schoolmate.n.01', 'name': 'schoolmate'}, {'id': 16948, 'synset': 'scientist.n.01', 'name': 'scientist'}, {'id': 16949, 'synset': 'scion.n.01', 'name': 'scion'}, {'id': 16950, 'synset': 'scoffer.n.02', 'name': 'scoffer'}, {'id': 16951, 'synset': 'scofflaw.n.01', 'name': 'scofflaw'}, {'id': 16952, 'synset': 'scorekeeper.n.01', 'name': 'scorekeeper'}, {'id': 16953, 'synset': 'scorer.n.02', 'name': 'scorer'}, {'id': 16954, 'synset': 'scourer.n.02', 'name': 'scourer'}, {'id': 16955, 'synset': 'scout.n.03', 'name': 'scout'}, {'id': 16956, 'synset': 'scoutmaster.n.01', 'name': 'scoutmaster'}, {'id': 16957, 'synset': 'scrambler.n.01', 'name': 'scrambler'}, {'id': 16958, 'synset': 'scratcher.n.02', 'name': 'scratcher'}, {'id': 16959, 'synset': 'screen_actor.n.01', 'name': 'screen_actor'}, {'id': 16960, 'synset': 'scrutineer.n.01', 'name': 'scrutineer'}, {'id': 16961, 'synset': 'scuba_diver.n.01', 'name': 'scuba_diver'}, {'id': 16962, 'synset': 'sculptor.n.01', 'name': 'sculptor'}, {'id': 16963, 'synset': 'sea_scout.n.01', 'name': 'Sea_Scout'}, {'id': 16964, 'synset': 'seasonal_worker.n.01', 'name': 'seasonal_worker'}, {'id': 16965, 'synset': 'seasoner.n.01', 'name': 'seasoner'}, {'id': 16966, 'synset': 'second_baseman.n.01', 'name': 'second_baseman'}, {'id': 16967, 'synset': 'second_cousin.n.01', 'name': 'second_cousin'}, {'id': 16968, 'synset': 'seconder.n.01', 'name': 'seconder'}, {'id': 16969, 'synset': 'second_fiddle.n.01', 'name': 'second_fiddle'}, {'id': 16970, 'synset': 'second-in-command.n.01', 'name': 'second-in-command'}, {'id': 16971, 'synset': 'second_lieutenant.n.01', 'name': 'second_lieutenant'}, {'id': 16972, 'synset': 'second-rater.n.01', 'name': 'second-rater'}, {'id': 16973, 'synset': 'secretary.n.01', 'name': 'secretary'}, {'id': 16974, 'synset': 'secretary_of_agriculture.n.01', 'name': 'Secretary_of_Agriculture'}, {'id': 16975, 'synset': 'secretary_of_health_and_human_services.n.01', 'name': 'Secretary_of_Health_and_Human_Services'}, {'id': 16976, 'synset': 'secretary_of_state.n.01', 'name': 'Secretary_of_State'}, {'id': 16977, 'synset': 'secretary_of_the_interior.n.02', 'name': 'Secretary_of_the_Interior'}, {'id': 16978, 'synset': 'sectarian.n.01', 'name': 'sectarian'}, {'id': 16979, 'synset': 'section_hand.n.01', 'name': 'section_hand'}, {'id': 16980, 'synset': 'secularist.n.01', 'name': 'secularist'}, {'id': 16981, 'synset': 'security_consultant.n.01', 'name': 'security_consultant'}, {'id': 16982, 'synset': 'seeded_player.n.01', 'name': 'seeded_player'}, {'id': 16983, 'synset': 'seeder.n.01', 'name': 'seeder'}, {'id': 16984, 'synset': 'seeker.n.01', 'name': 'seeker'}, {'id': 16985, 'synset': 'segregate.n.01', 'name': 'segregate'}, {'id': 16986, 'synset': 'segregator.n.01', 'name': 'segregator'}, {'id': 16987, 'synset': 'selectman.n.01', 'name': 'selectman'}, {'id': 16988, 'synset': 'selectwoman.n.01', 'name': 'selectwoman'}, {'id': 16989, 'synset': 'selfish_person.n.01', 'name': 'selfish_person'}, {'id': 16990, 'synset': 'self-starter.n.01', 'name': 'self-starter'}, {'id': 16991, 'synset': 'seller.n.01', 'name': 'seller'}, {'id': 16992, 'synset': 'selling_agent.n.01', 'name': 'selling_agent'}, {'id': 16993, 'synset': 'semanticist.n.01', 'name': 'semanticist'}, {'id': 16994, 'synset': 'semifinalist.n.01', 'name': 'semifinalist'}, {'id': 16995, 'synset': 'seminarian.n.01', 'name': 'seminarian'}, {'id': 16996, 'synset': 'senator.n.01', 'name': 'senator'}, {'id': 16997, 'synset': 'sendee.n.01', 'name': 'sendee'}, {'id': 16998, 'synset': 'senior.n.01', 'name': 'senior'}, {'id': 16999, 'synset': 'senior_vice_president.n.01', 'name': 'senior_vice_president'}, {'id': 17000, 'synset': 'separatist.n.01', 'name': 'separatist'}, {'id': 17001, 'synset': 'septuagenarian.n.01', 'name': 'septuagenarian'}, {'id': 17002, 'synset': 'serf.n.01', 'name': 'serf'}, {'id': 17003, 'synset': 'spree_killer.n.01', 'name': 'spree_killer'}, {'id': 17004, 'synset': 'serjeant-at-law.n.01', 'name': 'serjeant-at-law'}, {'id': 17005, 'synset': 'server.n.02', 'name': 'server'}, {'id': 17006, 'synset': 'serviceman.n.01', 'name': 'serviceman'}, {'id': 17007, 'synset': 'settler.n.01', 'name': 'settler'}, {'id': 17008, 'synset': 'settler.n.03', 'name': 'settler'}, {'id': 17009, 'synset': 'sex_symbol.n.01', 'name': 'sex_symbol'}, {'id': 17010, 'synset': 'sexton.n.02', 'name': 'sexton'}, {'id': 17011, 'synset': 'shaheed.n.01', 'name': 'shaheed'}, {'id': 17012, 'synset': 'shakespearian.n.01', 'name': 'Shakespearian'}, {'id': 17013, 'synset': 'shanghaier.n.01', 'name': 'shanghaier'}, {'id': 17014, 'synset': 'sharecropper.n.01', 'name': 'sharecropper'}, {'id': 17015, 'synset': 'shaver.n.01', 'name': 'shaver'}, {'id': 17016, 'synset': 'shavian.n.01', 'name': 'Shavian'}, {'id': 17017, 'synset': 'sheep.n.02', 'name': 'sheep'}, {'id': 17018, 'synset': 'sheik.n.01', 'name': 'sheik'}, {'id': 17019, 'synset': 'shelver.n.01', 'name': 'shelver'}, {'id': 17020, 'synset': 'shepherd.n.01', 'name': 'shepherd'}, {'id': 17021, 'synset': 'ship-breaker.n.01', 'name': 'ship-breaker'}, {'id': 17022, 'synset': 'shipmate.n.01', 'name': 'shipmate'}, {'id': 17023, 'synset': 'shipowner.n.01', 'name': 'shipowner'}, {'id': 17024, 'synset': 'shipping_agent.n.01', 'name': 'shipping_agent'}, {'id': 17025, 'synset': 'shirtmaker.n.01', 'name': 'shirtmaker'}, {'id': 17026, 'synset': 'shogun.n.01', 'name': 'shogun'}, {'id': 17027, 'synset': 'shopaholic.n.01', 'name': 'shopaholic'}, {'id': 17028, 'synset': 'shop_girl.n.01', 'name': 'shop_girl'}, {'id': 17029, 'synset': 'shop_steward.n.01', 'name': 'shop_steward'}, {'id': 17030, 'synset': 'shot_putter.n.01', 'name': 'shot_putter'}, {'id': 17031, 'synset': 'shrew.n.01', 'name': 'shrew'}, {'id': 17032, 'synset': 'shuffler.n.01', 'name': 'shuffler'}, {'id': 17033, 'synset': 'shyster.n.01', 'name': 'shyster'}, {'id': 17034, 'synset': 'sibling.n.01', 'name': 'sibling'}, {'id': 17035, 'synset': 'sick_person.n.01', 'name': 'sick_person'}, {'id': 17036, 'synset': 'sightreader.n.01', 'name': 'sightreader'}, {'id': 17037, 'synset': 'signaler.n.01', 'name': 'signaler'}, {'id': 17038, 'synset': 'signer.n.01', 'name': 'signer'}, {'id': 17039, 'synset': 'signor.n.01', 'name': 'signor'}, {'id': 17040, 'synset': 'signora.n.01', 'name': 'signora'}, {'id': 17041, 'synset': 'signore.n.01', 'name': 'signore'}, {'id': 17042, 'synset': 'signorina.n.01', 'name': 'signorina'}, {'id': 17043, 'synset': 'silent_partner.n.01', 'name': 'silent_partner'}, {'id': 17044, 'synset': 'addle-head.n.01', 'name': 'addle-head'}, {'id': 17045, 'synset': 'simperer.n.01', 'name': 'simperer'}, {'id': 17046, 'synset': 'singer.n.01', 'name': 'singer'}, {'id': 17047, 'synset': 'sinologist.n.01', 'name': 'Sinologist'}, {'id': 17048, 'synset': 'sipper.n.01', 'name': 'sipper'}, {'id': 17049, 'synset': 'sirrah.n.01', 'name': 'sirrah'}, {'id': 17050, 'synset': 'sister.n.02', 'name': 'Sister'}, {'id': 17051, 'synset': 'sister.n.01', 'name': 'sister'}, {'id': 17052, 'synset': 'waverer.n.01', 'name': 'waverer'}, {'id': 17053, 'synset': 'sitar_player.n.01', 'name': 'sitar_player'}, {'id': 17054, 'synset': 'sixth-former.n.01', 'name': 'sixth-former'}, {'id': 17055, 'synset': 'skateboarder.n.01', 'name': 'skateboarder'}, {'id': 17056, 'synset': 'skeptic.n.01', 'name': 'skeptic'}, {'id': 17057, 'synset': 'sketcher.n.01', 'name': 'sketcher'}, {'id': 17058, 'synset': 'skidder.n.02', 'name': 'skidder'}, {'id': 17059, 'synset': 'skier.n.01', 'name': 'skier'}, {'id': 17060, 'synset': 'skinny-dipper.n.01', 'name': 'skinny-dipper'}, {'id': 17061, 'synset': 'skin-diver.n.01', 'name': 'skin-diver'}, {'id': 17062, 'synset': 'skinhead.n.01', 'name': 'skinhead'}, {'id': 17063, 'synset': 'slasher.n.01', 'name': 'slasher'}, {'id': 17064, 'synset': 'slattern.n.02', 'name': 'slattern'}, {'id': 17065, 'synset': 'sleeper.n.01', 'name': 'sleeper'}, {'id': 17066, 'synset': 'sleeper.n.02', 'name': 'sleeper'}, {'id': 17067, 'synset': 'sleeping_beauty.n.02', 'name': 'sleeping_beauty'}, {'id': 17068, 'synset': 'sleuth.n.01', 'name': 'sleuth'}, {'id': 17069, 'synset': 'slob.n.01', 'name': 'slob'}, {'id': 17070, 'synset': 'sloganeer.n.01', 'name': 'sloganeer'}, {'id': 17071, 'synset': 'slopseller.n.01', 'name': 'slopseller'}, {'id': 17072, 'synset': 'smasher.n.02', 'name': 'smasher'}, {'id': 17073, 'synset': 'smirker.n.01', 'name': 'smirker'}, {'id': 17074, 'synset': 'smith.n.10', 'name': 'smith'}, {'id': 17075, 'synset': 'smoothie.n.01', 'name': 'smoothie'}, {'id': 17076, 'synset': 'smuggler.n.01', 'name': 'smuggler'}, {'id': 17077, 'synset': 'sneezer.n.01', 'name': 'sneezer'}, {'id': 17078, 'synset': 'snob.n.01', 'name': 'snob'}, {'id': 17079, 'synset': 'snoop.n.01', 'name': 'snoop'}, {'id': 17080, 'synset': 'snorer.n.01', 'name': 'snorer'}, {'id': 17081, 'synset': 'sob_sister.n.01', 'name': 'sob_sister'}, {'id': 17082, 'synset': 'soccer_player.n.01', 'name': 'soccer_player'}, {'id': 17083, 'synset': 'social_anthropologist.n.01', 'name': 'social_anthropologist'}, {'id': 17084, 'synset': 'social_climber.n.01', 'name': 'social_climber'}, {'id': 17085, 'synset': 'socialist.n.01', 'name': 'socialist'}, {'id': 17086, 'synset': 'socializer.n.01', 'name': 'socializer'}, {'id': 17087, 'synset': 'social_scientist.n.01', 'name': 'social_scientist'}, {'id': 17088, 'synset': 'social_secretary.n.01', 'name': 'social_secretary'}, {'id': 17089, 'synset': 'socinian.n.01', 'name': 'Socinian'}, {'id': 17090, 'synset': 'sociolinguist.n.01', 'name': 'sociolinguist'}, {'id': 17091, 'synset': 'sociologist.n.01', 'name': 'sociologist'}, {'id': 17092, 'synset': 'soda_jerk.n.01', 'name': 'soda_jerk'}, {'id': 17093, 'synset': 'sodalist.n.01', 'name': 'sodalist'}, {'id': 17094, 'synset': 'sodomite.n.01', 'name': 'sodomite'}, {'id': 17095, 'synset': 'soldier.n.01', 'name': 'soldier'}, {'id': 17096, 'synset': 'son.n.01', 'name': 'son'}, {'id': 17097, 'synset': 'songster.n.02', 'name': 'songster'}, {'id': 17098, 'synset': 'songstress.n.01', 'name': 'songstress'}, {'id': 17099, 'synset': 'songwriter.n.01', 'name': 'songwriter'}, {'id': 17100, 'synset': 'sorcerer.n.01', 'name': 'sorcerer'}, {'id': 17101, 'synset': 'sorehead.n.01', 'name': 'sorehead'}, {'id': 17102, 'synset': 'soul_mate.n.01', 'name': 'soul_mate'}, {'id': 17103, 'synset': 'southern_baptist.n.01', 'name': 'Southern_Baptist'}, {'id': 17104, 'synset': 'sovereign.n.01', 'name': 'sovereign'}, {'id': 17105, 'synset': 'spacewalker.n.01', 'name': 'spacewalker'}, {'id': 17106, 'synset': 'spanish_american.n.01', 'name': 'Spanish_American'}, {'id': 17107, 'synset': 'sparring_partner.n.01', 'name': 'sparring_partner'}, {'id': 17108, 'synset': 'spastic.n.01', 'name': 'spastic'}, {'id': 17109, 'synset': 'speaker.n.01', 'name': 'speaker'}, {'id': 17110, 'synset': 'native_speaker.n.01', 'name': 'native_speaker'}, {'id': 17111, 'synset': 'speaker.n.03', 'name': 'Speaker'}, {'id': 17112, 'synset': 'speechwriter.n.01', 'name': 'speechwriter'}, {'id': 17113, 'synset': 'specialist.n.02', 'name': 'specialist'}, {'id': 17114, 'synset': 'specifier.n.01', 'name': 'specifier'}, {'id': 17115, 'synset': 'spectator.n.01', 'name': 'spectator'}, {'id': 17116, 'synset': 'speech_therapist.n.01', 'name': 'speech_therapist'}, {'id': 17117, 'synset': 'speedskater.n.01', 'name': 'speedskater'}, {'id': 17118, 'synset': 'spellbinder.n.01', 'name': 'spellbinder'}, {'id': 17119, 'synset': 'sphinx.n.01', 'name': 'sphinx'}, {'id': 17120, 'synset': 'spinster.n.01', 'name': 'spinster'}, {'id': 17121, 'synset': 'split_end.n.01', 'name': 'split_end'}, {'id': 17122, 'synset': 'sport.n.05', 'name': 'sport'}, {'id': 17123, 'synset': 'sport.n.03', 'name': 'sport'}, {'id': 17124, 'synset': 'sporting_man.n.02', 'name': 'sporting_man'}, {'id': 17125, 'synset': 'sports_announcer.n.01', 'name': 'sports_announcer'}, {'id': 17126, 'synset': 'sports_editor.n.01', 'name': 'sports_editor'}, {'id': 17127, 'synset': 'sprog.n.02', 'name': 'sprog'}, {'id': 17128, 'synset': 'square_dancer.n.01', 'name': 'square_dancer'}, {'id': 17129, 'synset': 'square_shooter.n.01', 'name': 'square_shooter'}, {'id': 17130, 'synset': 'squatter.n.02', 'name': 'squatter'}, {'id': 17131, 'synset': 'squire.n.02', 'name': 'squire'}, {'id': 17132, 'synset': 'squire.n.01', 'name': 'squire'}, {'id': 17133, 'synset': 'staff_member.n.01', 'name': 'staff_member'}, {'id': 17134, 'synset': 'staff_sergeant.n.01', 'name': 'staff_sergeant'}, {'id': 17135, 'synset': 'stage_director.n.01', 'name': 'stage_director'}, {'id': 17136, 'synset': 'stainer.n.01', 'name': 'stainer'}, {'id': 17137, 'synset': 'stakeholder.n.01', 'name': 'stakeholder'}, {'id': 17138, 'synset': 'stalker.n.02', 'name': 'stalker'}, {'id': 17139, 'synset': 'stalking-horse.n.01', 'name': 'stalking-horse'}, {'id': 17140, 'synset': 'stammerer.n.01', 'name': 'stammerer'}, {'id': 17141, 'synset': 'stamper.n.02', 'name': 'stamper'}, {'id': 17142, 'synset': 'standee.n.01', 'name': 'standee'}, {'id': 17143, 'synset': 'stand-in.n.01', 'name': 'stand-in'}, {'id': 17144, 'synset': 'star.n.04', 'name': 'star'}, {'id': 17145, 'synset': 'starlet.n.01', 'name': 'starlet'}, {'id': 17146, 'synset': 'starter.n.03', 'name': 'starter'}, {'id': 17147, 'synset': 'statesman.n.01', 'name': 'statesman'}, {'id': 17148, 'synset': 'state_treasurer.n.01', 'name': 'state_treasurer'}, {'id': 17149, 'synset': 'stationer.n.01', 'name': 'stationer'}, {'id': 17150, 'synset': 'stenographer.n.01', 'name': 'stenographer'}, {'id': 17151, 'synset': 'stentor.n.01', 'name': 'stentor'}, {'id': 17152, 'synset': 'stepbrother.n.01', 'name': 'stepbrother'}, {'id': 17153, 'synset': 'stepmother.n.01', 'name': 'stepmother'}, {'id': 17154, 'synset': 'stepparent.n.01', 'name': 'stepparent'}, {'id': 17155, 'synset': 'stevedore.n.01', 'name': 'stevedore'}, {'id': 17156, 'synset': 'steward.n.01', 'name': 'steward'}, {'id': 17157, 'synset': 'steward.n.03', 'name': 'steward'}, {'id': 17158, 'synset': 'steward.n.02', 'name': 'steward'}, {'id': 17159, 'synset': 'stickler.n.01', 'name': 'stickler'}, {'id': 17160, 'synset': 'stiff.n.01', 'name': 'stiff'}, {'id': 17161, 'synset': 'stifler.n.01', 'name': 'stifler'}, {'id': 17162, 'synset': 'stipendiary.n.01', 'name': 'stipendiary'}, {'id': 17163, 'synset': 'stitcher.n.01', 'name': 'stitcher'}, {'id': 17164, 'synset': 'stockjobber.n.01', 'name': 'stockjobber'}, {'id': 17165, 'synset': 'stock_trader.n.01', 'name': 'stock_trader'}, {'id': 17166, 'synset': 'stockist.n.01', 'name': 'stockist'}, {'id': 17167, 'synset': 'stoker.n.02', 'name': 'stoker'}, {'id': 17168, 'synset': 'stooper.n.02', 'name': 'stooper'}, {'id': 17169, 'synset': 'store_detective.n.01', 'name': 'store_detective'}, {'id': 17170, 'synset': 'strafer.n.01', 'name': 'strafer'}, {'id': 17171, 'synset': 'straight_man.n.01', 'name': 'straight_man'}, {'id': 17172, 'synset': 'stranger.n.01', 'name': 'stranger'}, {'id': 17173, 'synset': 'stranger.n.02', 'name': 'stranger'}, {'id': 17174, 'synset': 'strategist.n.01', 'name': 'strategist'}, {'id': 17175, 'synset': 'straw_boss.n.01', 'name': 'straw_boss'}, {'id': 17176, 'synset': 'streetwalker.n.01', 'name': 'streetwalker'}, {'id': 17177, 'synset': 'stretcher-bearer.n.01', 'name': 'stretcher-bearer'}, {'id': 17178, 'synset': 'struggler.n.01', 'name': 'struggler'}, {'id': 17179, 'synset': 'stud.n.01', 'name': 'stud'}, {'id': 17180, 'synset': 'student.n.01', 'name': 'student'}, {'id': 17181, 'synset': 'stumblebum.n.01', 'name': 'stumblebum'}, {'id': 17182, 'synset': 'stylist.n.01', 'name': 'stylist'}, {'id': 17183, 'synset': 'subaltern.n.01', 'name': 'subaltern'}, {'id': 17184, 'synset': 'subcontractor.n.01', 'name': 'subcontractor'}, {'id': 17185, 'synset': 'subduer.n.01', 'name': 'subduer'}, {'id': 17186, 'synset': 'subject.n.06', 'name': 'subject'}, {'id': 17187, 'synset': 'subordinate.n.01', 'name': 'subordinate'}, {'id': 17188, 'synset': 'substitute.n.02', 'name': 'substitute'}, {'id': 17189, 'synset': 'successor.n.03', 'name': 'successor'}, {'id': 17190, 'synset': 'successor.n.01', 'name': 'successor'}, {'id': 17191, 'synset': 'succorer.n.01', 'name': 'succorer'}, {'id': 17192, 'synset': 'sufi.n.01', 'name': 'Sufi'}, {'id': 17193, 'synset': 'suffragan.n.01', 'name': 'suffragan'}, {'id': 17194, 'synset': 'suffragette.n.01', 'name': 'suffragette'}, {'id': 17195, 'synset': 'sugar_daddy.n.01', 'name': 'sugar_daddy'}, {'id': 17196, 'synset': 'suicide_bomber.n.01', 'name': 'suicide_bomber'}, {'id': 17197, 'synset': 'suitor.n.01', 'name': 'suitor'}, {'id': 17198, 'synset': 'sumo_wrestler.n.01', 'name': 'sumo_wrestler'}, {'id': 17199, 'synset': 'sunbather.n.01', 'name': 'sunbather'}, {'id': 17200, 'synset': 'sundowner.n.01', 'name': 'sundowner'}, {'id': 17201, 'synset': 'super_heavyweight.n.01', 'name': 'super_heavyweight'}, {'id': 17202, 'synset': 'superior.n.01', 'name': 'superior'}, {'id': 17203, 'synset': 'supermom.n.01', 'name': 'supermom'}, {'id': 17204, 'synset': 'supernumerary.n.02', 'name': 'supernumerary'}, {'id': 17205, 'synset': 'supremo.n.01', 'name': 'supremo'}, {'id': 17206, 'synset': 'surgeon.n.01', 'name': 'surgeon'}, {'id': 17207, 'synset': 'surgeon_general.n.02', 'name': 'Surgeon_General'}, {'id': 17208, 'synset': 'surgeon_general.n.01', 'name': 'Surgeon_General'}, {'id': 17209, 'synset': 'surpriser.n.01', 'name': 'surpriser'}, {'id': 17210, 'synset': 'surveyor.n.01', 'name': 'surveyor'}, {'id': 17211, 'synset': 'surveyor.n.02', 'name': 'surveyor'}, {'id': 17212, 'synset': 'survivor.n.01', 'name': 'survivor'}, {'id': 17213, 'synset': 'sutler.n.01', 'name': 'sutler'}, {'id': 17214, 'synset': 'sweeper.n.01', 'name': 'sweeper'}, {'id': 17215, 'synset': 'sweetheart.n.01', 'name': 'sweetheart'}, {'id': 17216, 'synset': 'swinger.n.02', 'name': 'swinger'}, {'id': 17217, 'synset': 'switcher.n.01', 'name': 'switcher'}, {'id': 17218, 'synset': 'swot.n.01', 'name': 'swot'}, {'id': 17219, 'synset': 'sycophant.n.01', 'name': 'sycophant'}, {'id': 17220, 'synset': 'sylph.n.01', 'name': 'sylph'}, {'id': 17221, 'synset': 'sympathizer.n.02', 'name': 'sympathizer'}, {'id': 17222, 'synset': 'symphonist.n.01', 'name': 'symphonist'}, {'id': 17223, 'synset': 'syncopator.n.01', 'name': 'syncopator'}, {'id': 17224, 'synset': 'syndic.n.01', 'name': 'syndic'}, {'id': 17225, 'synset': 'tactician.n.01', 'name': 'tactician'}, {'id': 17226, 'synset': 'tagger.n.02', 'name': 'tagger'}, {'id': 17227, 'synset': 'tailback.n.01', 'name': 'tailback'}, {'id': 17228, 'synset': 'tallyman.n.02', 'name': 'tallyman'}, {'id': 17229, 'synset': 'tallyman.n.01', 'name': 'tallyman'}, {'id': 17230, 'synset': 'tanker.n.02', 'name': 'tanker'}, {'id': 17231, 'synset': 'tapper.n.04', 'name': 'tapper'}, {'id': 17232, 'synset': 'tartuffe.n.01', 'name': 'Tartuffe'}, {'id': 17233, 'synset': 'tarzan.n.01', 'name': 'Tarzan'}, {'id': 17234, 'synset': 'taster.n.01', 'name': 'taster'}, {'id': 17235, 'synset': 'tax_assessor.n.01', 'name': 'tax_assessor'}, {'id': 17236, 'synset': 'taxer.n.01', 'name': 'taxer'}, {'id': 17237, 'synset': 'taxi_dancer.n.01', 'name': 'taxi_dancer'}, {'id': 17238, 'synset': 'taxonomist.n.01', 'name': 'taxonomist'}, {'id': 17239, 'synset': 'teacher.n.01', 'name': 'teacher'}, {'id': 17240, 'synset': 'teaching_fellow.n.01', 'name': 'teaching_fellow'}, {'id': 17241, 'synset': 'tearaway.n.01', 'name': 'tearaway'}, {'id': 17242, 'synset': 'technical_sergeant.n.01', 'name': 'technical_sergeant'}, {'id': 17243, 'synset': 'technician.n.02', 'name': 'technician'}, {'id': 17244, 'synset': 'ted.n.01', 'name': 'Ted'}, {'id': 17245, 'synset': 'teetotaler.n.01', 'name': 'teetotaler'}, {'id': 17246, 'synset': 'television_reporter.n.01', 'name': 'television_reporter'}, {'id': 17247, 'synset': 'temporizer.n.01', 'name': 'temporizer'}, {'id': 17248, 'synset': 'tempter.n.01', 'name': 'tempter'}, {'id': 17249, 'synset': 'term_infant.n.01', 'name': 'term_infant'}, {'id': 17250, 'synset': 'toiler.n.01', 'name': 'toiler'}, {'id': 17251, 'synset': 'tenant.n.01', 'name': 'tenant'}, {'id': 17252, 'synset': 'tenant.n.02', 'name': 'tenant'}, {'id': 17253, 'synset': 'tenderfoot.n.01', 'name': 'tenderfoot'}, {'id': 17254, 'synset': 'tennis_player.n.01', 'name': 'tennis_player'}, {'id': 17255, 'synset': 'tennis_pro.n.01', 'name': 'tennis_pro'}, {'id': 17256, 'synset': 'tenor_saxophonist.n.01', 'name': 'tenor_saxophonist'}, {'id': 17257, 'synset': 'termer.n.01', 'name': 'termer'}, {'id': 17258, 'synset': 'terror.n.02', 'name': 'terror'}, {'id': 17259, 'synset': 'tertigravida.n.01', 'name': 'tertigravida'}, {'id': 17260, 'synset': 'testator.n.01', 'name': 'testator'}, {'id': 17261, 'synset': 'testatrix.n.01', 'name': 'testatrix'}, {'id': 17262, 'synset': 'testee.n.01', 'name': 'testee'}, {'id': 17263, 'synset': 'test-tube_baby.n.01', 'name': 'test-tube_baby'}, {'id': 17264, 'synset': 'texas_ranger.n.01', 'name': 'Texas_Ranger'}, {'id': 17265, 'synset': 'thane.n.02', 'name': 'thane'}, {'id': 17266, 'synset': 'theatrical_producer.n.01', 'name': 'theatrical_producer'}, {'id': 17267, 'synset': 'theologian.n.01', 'name': 'theologian'}, {'id': 17268, 'synset': 'theorist.n.01', 'name': 'theorist'}, {'id': 17269, 'synset': 'theosophist.n.01', 'name': 'theosophist'}, {'id': 17270, 'synset': 'therapist.n.01', 'name': 'therapist'}, {'id': 17271, 'synset': 'thessalonian.n.01', 'name': 'Thessalonian'}, {'id': 17272, 'synset': 'thinker.n.01', 'name': 'thinker'}, {'id': 17273, 'synset': 'thinker.n.02', 'name': 'thinker'}, {'id': 17274, 'synset': 'thrower.n.02', 'name': 'thrower'}, {'id': 17275, 'synset': 'thurifer.n.01', 'name': 'thurifer'}, {'id': 17276, 'synset': 'ticket_collector.n.01', 'name': 'ticket_collector'}, {'id': 17277, 'synset': 'tight_end.n.01', 'name': 'tight_end'}, {'id': 17278, 'synset': 'tiler.n.01', 'name': 'tiler'}, {'id': 17279, 'synset': 'timekeeper.n.01', 'name': 'timekeeper'}, {'id': 17280, 'synset': 'timorese.n.01', 'name': 'Timorese'}, {'id': 17281, 'synset': 'tinkerer.n.01', 'name': 'tinkerer'}, {'id': 17282, 'synset': 'tinsmith.n.01', 'name': 'tinsmith'}, {'id': 17283, 'synset': 'tinter.n.01', 'name': 'tinter'}, {'id': 17284, 'synset': 'tippler.n.01', 'name': 'tippler'}, {'id': 17285, 'synset': 'tipster.n.01', 'name': 'tipster'}, {'id': 17286, 'synset': 't-man.n.01', 'name': 'T-man'}, {'id': 17287, 'synset': 'toastmaster.n.01', 'name': 'toastmaster'}, {'id': 17288, 'synset': 'toast_mistress.n.01', 'name': 'toast_mistress'}, {'id': 17289, 'synset': 'tobogganist.n.01', 'name': 'tobogganist'}, {'id': 17290, 'synset': 'tomboy.n.01', 'name': 'tomboy'}, {'id': 17291, 'synset': 'toolmaker.n.01', 'name': 'toolmaker'}, {'id': 17292, 'synset': 'torchbearer.n.01', 'name': 'torchbearer'}, {'id': 17293, 'synset': 'tory.n.01', 'name': 'Tory'}, {'id': 17294, 'synset': 'tory.n.02', 'name': 'Tory'}, {'id': 17295, 'synset': 'tosser.n.02', 'name': 'tosser'}, {'id': 17296, 'synset': 'tosser.n.01', 'name': 'tosser'}, {'id': 17297, 'synset': 'totalitarian.n.01', 'name': 'totalitarian'}, {'id': 17298, 'synset': 'tourist.n.01', 'name': 'tourist'}, {'id': 17299, 'synset': 'tout.n.02', 'name': 'tout'}, {'id': 17300, 'synset': 'tout.n.01', 'name': 'tout'}, {'id': 17301, 'synset': 'tovarich.n.01', 'name': 'tovarich'}, {'id': 17302, 'synset': 'towhead.n.01', 'name': 'towhead'}, {'id': 17303, 'synset': 'town_clerk.n.01', 'name': 'town_clerk'}, {'id': 17304, 'synset': 'town_crier.n.01', 'name': 'town_crier'}, {'id': 17305, 'synset': 'townsman.n.02', 'name': 'townsman'}, {'id': 17306, 'synset': 'toxicologist.n.01', 'name': 'toxicologist'}, {'id': 17307, 'synset': 'track_star.n.01', 'name': 'track_star'}, {'id': 17308, 'synset': 'trader.n.01', 'name': 'trader'}, {'id': 17309, 'synset': 'trade_unionist.n.01', 'name': 'trade_unionist'}, {'id': 17310, 'synset': 'traditionalist.n.01', 'name': 'traditionalist'}, {'id': 17311, 'synset': 'traffic_cop.n.01', 'name': 'traffic_cop'}, {'id': 17312, 'synset': 'tragedian.n.02', 'name': 'tragedian'}, {'id': 17313, 'synset': 'tragedian.n.01', 'name': 'tragedian'}, {'id': 17314, 'synset': 'tragedienne.n.01', 'name': 'tragedienne'}, {'id': 17315, 'synset': 'trail_boss.n.01', 'name': 'trail_boss'}, {'id': 17316, 'synset': 'trainer.n.01', 'name': 'trainer'}, {'id': 17317, 'synset': 'traitor.n.01', 'name': 'traitor'}, {'id': 17318, 'synset': 'traitress.n.01', 'name': 'traitress'}, {'id': 17319, 'synset': 'transactor.n.01', 'name': 'transactor'}, {'id': 17320, 'synset': 'transcriber.n.03', 'name': 'transcriber'}, {'id': 17321, 'synset': 'transfer.n.02', 'name': 'transfer'}, {'id': 17322, 'synset': 'transferee.n.01', 'name': 'transferee'}, {'id': 17323, 'synset': 'translator.n.01', 'name': 'translator'}, {'id': 17324, 'synset': 'transvestite.n.01', 'name': 'transvestite'}, {'id': 17325, 'synset': 'traveling_salesman.n.01', 'name': 'traveling_salesman'}, {'id': 17326, 'synset': 'traverser.n.01', 'name': 'traverser'}, {'id': 17327, 'synset': 'trawler.n.01', 'name': 'trawler'}, {'id': 17328, 'synset': 'treasury.n.04', 'name': 'Treasury'}, {'id': 17329, 'synset': 'trencher.n.01', 'name': 'trencher'}, {'id': 17330, 'synset': 'trend-setter.n.01', 'name': 'trend-setter'}, {'id': 17331, 'synset': 'tribesman.n.01', 'name': 'tribesman'}, {'id': 17332, 'synset': 'trier.n.02', 'name': 'trier'}, {'id': 17333, 'synset': 'trifler.n.01', 'name': 'trifler'}, {'id': 17334, 'synset': 'trooper.n.02', 'name': 'trooper'}, {'id': 17335, 'synset': 'trooper.n.03', 'name': 'trooper'}, {'id': 17336, 'synset': 'trotskyite.n.01', 'name': 'Trotskyite'}, {'id': 17337, 'synset': 'truant.n.01', 'name': 'truant'}, {'id': 17338, 'synset': 'trumpeter.n.01', 'name': 'trumpeter'}, {'id': 17339, 'synset': 'trusty.n.01', 'name': 'trusty'}, {'id': 17340, 'synset': 'tudor.n.03', 'name': 'Tudor'}, {'id': 17341, 'synset': 'tumbler.n.01', 'name': 'tumbler'}, {'id': 17342, 'synset': 'tutee.n.01', 'name': 'tutee'}, {'id': 17343, 'synset': 'twin.n.01', 'name': 'twin'}, {'id': 17344, 'synset': 'two-timer.n.01', 'name': 'two-timer'}, {'id': 17345, 'synset': 'tyke.n.01', 'name': 'Tyke'}, {'id': 17346, 'synset': 'tympanist.n.01', 'name': 'tympanist'}, {'id': 17347, 'synset': 'typist.n.01', 'name': 'typist'}, {'id': 17348, 'synset': 'tyrant.n.01', 'name': 'tyrant'}, {'id': 17349, 'synset': 'umpire.n.01', 'name': 'umpire'}, {'id': 17350, 'synset': 'understudy.n.01', 'name': 'understudy'}, {'id': 17351, 'synset': 'undesirable.n.01', 'name': 'undesirable'}, {'id': 17352, 'synset': 'unicyclist.n.01', 'name': 'unicyclist'}, {'id': 17353, 'synset': 'unilateralist.n.01', 'name': 'unilateralist'}, {'id': 17354, 'synset': 'unitarian.n.01', 'name': 'Unitarian'}, {'id': 17355, 'synset': 'arminian.n.01', 'name': 'Arminian'}, {'id': 17356, 'synset': 'universal_donor.n.01', 'name': 'universal_donor'}, {'id': 17357, 'synset': 'unix_guru.n.01', 'name': 'UNIX_guru'}, {'id': 17358, 'synset': 'unknown_soldier.n.01', 'name': 'Unknown_Soldier'}, {'id': 17359, 'synset': 'upsetter.n.01', 'name': 'upsetter'}, {'id': 17360, 'synset': 'upstager.n.01', 'name': 'upstager'}, {'id': 17361, 'synset': 'upstart.n.02', 'name': 'upstart'}, {'id': 17362, 'synset': 'upstart.n.01', 'name': 'upstart'}, {'id': 17363, 'synset': 'urchin.n.01', 'name': 'urchin'}, {'id': 17364, 'synset': 'urologist.n.01', 'name': 'urologist'}, {'id': 17365, 'synset': 'usherette.n.01', 'name': 'usherette'}, {'id': 17366, 'synset': 'usher.n.02', 'name': 'usher'}, {'id': 17367, 'synset': 'usurper.n.01', 'name': 'usurper'}, {'id': 17368, 'synset': 'utility_man.n.01', 'name': 'utility_man'}, {'id': 17369, 'synset': 'utilizer.n.01', 'name': 'utilizer'}, {'id': 17370, 'synset': 'utopian.n.01', 'name': 'Utopian'}, {'id': 17371, 'synset': 'uxoricide.n.01', 'name': 'uxoricide'}, {'id': 17372, 'synset': 'vacationer.n.01', 'name': 'vacationer'}, {'id': 17373, 'synset': 'valedictorian.n.01', 'name': 'valedictorian'}, {'id': 17374, 'synset': 'valley_girl.n.01', 'name': 'valley_girl'}, {'id': 17375, 'synset': 'vaulter.n.01', 'name': 'vaulter'}, {'id': 17376, 'synset': 'vegetarian.n.01', 'name': 'vegetarian'}, {'id': 17377, 'synset': 'vegan.n.01', 'name': 'vegan'}, {'id': 17378, 'synset': 'venerator.n.01', 'name': 'venerator'}, {'id': 17379, 'synset': 'venture_capitalist.n.01', 'name': 'venture_capitalist'}, {'id': 17380, 'synset': 'venturer.n.01', 'name': 'venturer'}, {'id': 17381, 'synset': 'vermin.n.01', 'name': 'vermin'}, {'id': 17382, 'synset': 'very_important_person.n.01', 'name': 'very_important_person'}, {'id': 17383, 'synset': 'vibist.n.01', 'name': 'vibist'}, {'id': 17384, 'synset': 'vicar.n.01', 'name': 'vicar'}, {'id': 17385, 'synset': 'vicar.n.03', 'name': 'vicar'}, {'id': 17386, 'synset': 'vicar-general.n.01', 'name': 'vicar-general'}, {'id': 17387, 'synset': 'vice_chancellor.n.01', 'name': 'vice_chancellor'}, {'id': 17388, 'synset': 'vicegerent.n.01', 'name': 'vicegerent'}, {'id': 17389, 'synset': 'vice_president.n.01', 'name': 'vice_president'}, {'id': 17390, 'synset': 'vice-regent.n.01', 'name': 'vice-regent'}, {'id': 17391, 'synset': 'victim.n.02', 'name': 'victim'}, {'id': 17392, 'synset': 'victorian.n.01', 'name': 'Victorian'}, {'id': 17393, 'synset': 'victualer.n.01', 'name': 'victualer'}, {'id': 17394, 'synset': 'vigilante.n.01', 'name': 'vigilante'}, {'id': 17395, 'synset': 'villager.n.01', 'name': 'villager'}, {'id': 17396, 'synset': 'vintager.n.01', 'name': 'vintager'}, {'id': 17397, 'synset': 'vintner.n.01', 'name': 'vintner'}, {'id': 17398, 'synset': 'violator.n.02', 'name': 'violator'}, {'id': 17399, 'synset': 'violator.n.01', 'name': 'violator'}, {'id': 17400, 'synset': 'violist.n.01', 'name': 'violist'}, {'id': 17401, 'synset': 'virago.n.01', 'name': 'virago'}, {'id': 17402, 'synset': 'virologist.n.01', 'name': 'virologist'}, {'id': 17403, 'synset': 'visayan.n.01', 'name': 'Visayan'}, {'id': 17404, 'synset': 'viscountess.n.01', 'name': 'viscountess'}, {'id': 17405, 'synset': 'viscount.n.01', 'name': 'viscount'}, {'id': 17406, 'synset': 'visigoth.n.01', 'name': 'Visigoth'}, {'id': 17407, 'synset': 'visionary.n.01', 'name': 'visionary'}, {'id': 17408, 'synset': 'visiting_fireman.n.01', 'name': 'visiting_fireman'}, {'id': 17409, 'synset': 'visiting_professor.n.01', 'name': 'visiting_professor'}, {'id': 17410, 'synset': 'visualizer.n.01', 'name': 'visualizer'}, {'id': 17411, 'synset': 'vixen.n.01', 'name': 'vixen'}, {'id': 17412, 'synset': 'vizier.n.01', 'name': 'vizier'}, {'id': 17413, 'synset': 'voicer.n.01', 'name': 'voicer'}, {'id': 17414, 'synset': 'volunteer.n.02', 'name': 'volunteer'}, {'id': 17415, 'synset': 'volunteer.n.01', 'name': 'volunteer'}, {'id': 17416, 'synset': 'votary.n.02', 'name': 'votary'}, {'id': 17417, 'synset': 'votary.n.01', 'name': 'votary'}, {'id': 17418, 'synset': 'vouchee.n.01', 'name': 'vouchee'}, {'id': 17419, 'synset': 'vower.n.01', 'name': 'vower'}, {'id': 17420, 'synset': 'voyager.n.01', 'name': 'voyager'}, {'id': 17421, 'synset': 'voyeur.n.01', 'name': 'voyeur'}, {'id': 17422, 'synset': 'vulcanizer.n.01', 'name': 'vulcanizer'}, {'id': 17423, 'synset': 'waffler.n.01', 'name': 'waffler'}, {'id': 17424, 'synset': 'wagnerian.n.01', 'name': 'Wagnerian'}, {'id': 17425, 'synset': 'waif.n.01', 'name': 'waif'}, {'id': 17426, 'synset': 'wailer.n.01', 'name': 'wailer'}, {'id': 17427, 'synset': 'waiter.n.01', 'name': 'waiter'}, {'id': 17428, 'synset': 'waitress.n.01', 'name': 'waitress'}, {'id': 17429, 'synset': 'walking_delegate.n.01', 'name': 'walking_delegate'}, {'id': 17430, 'synset': 'walk-on.n.01', 'name': 'walk-on'}, {'id': 17431, 'synset': 'wallah.n.01', 'name': 'wallah'}, {'id': 17432, 'synset': 'wally.n.01', 'name': 'wally'}, {'id': 17433, 'synset': 'waltzer.n.01', 'name': 'waltzer'}, {'id': 17434, 'synset': 'wanderer.n.01', 'name': 'wanderer'}, {'id': 17435, 'synset': 'wandering_jew.n.01', 'name': 'Wandering_Jew'}, {'id': 17436, 'synset': 'wanton.n.01', 'name': 'wanton'}, {'id': 17437, 'synset': 'warrantee.n.02', 'name': 'warrantee'}, {'id': 17438, 'synset': 'warrantee.n.01', 'name': 'warrantee'}, {'id': 17439, 'synset': 'washer.n.01', 'name': 'washer'}, {'id': 17440, 'synset': 'washerman.n.01', 'name': 'washerman'}, {'id': 17441, 'synset': 'washwoman.n.01', 'name': 'washwoman'}, {'id': 17442, 'synset': 'wassailer.n.01', 'name': 'wassailer'}, {'id': 17443, 'synset': 'wastrel.n.01', 'name': 'wastrel'}, {'id': 17444, 'synset': 'wave.n.09', 'name': 'Wave'}, {'id': 17445, 'synset': 'weatherman.n.01', 'name': 'weatherman'}, {'id': 17446, 'synset': 'weekend_warrior.n.02', 'name': 'weekend_warrior'}, {'id': 17447, 'synset': 'weeder.n.01', 'name': 'weeder'}, {'id': 17448, 'synset': 'welder.n.01', 'name': 'welder'}, {'id': 17449, 'synset': 'welfare_case.n.01', 'name': 'welfare_case'}, {'id': 17450, 'synset': 'westerner.n.01', 'name': 'westerner'}, {'id': 17451, 'synset': 'west-sider.n.01', 'name': 'West-sider'}, {'id': 17452, 'synset': 'wetter.n.02', 'name': 'wetter'}, {'id': 17453, 'synset': 'whaler.n.01', 'name': 'whaler'}, {'id': 17454, 'synset': 'whig.n.02', 'name': 'Whig'}, {'id': 17455, 'synset': 'whiner.n.01', 'name': 'whiner'}, {'id': 17456, 'synset': 'whipper-in.n.01', 'name': 'whipper-in'}, {'id': 17457, 'synset': 'whisperer.n.01', 'name': 'whisperer'}, {'id': 17458, 'synset': 'whiteface.n.02', 'name': 'whiteface'}, {'id': 17459, 'synset': 'carmelite.n.01', 'name': 'Carmelite'}, {'id': 17460, 'synset': 'augustinian.n.01', 'name': 'Augustinian'}, {'id': 17461, 'synset': 'white_hope.n.01', 'name': 'white_hope'}, {'id': 17462, 'synset': 'white_supremacist.n.01', 'name': 'white_supremacist'}, {'id': 17463, 'synset': 'whoremaster.n.02', 'name': 'whoremaster'}, {'id': 17464, 'synset': 'whoremaster.n.01', 'name': 'whoremaster'}, {'id': 17465, 'synset': 'widow.n.01', 'name': 'widow'}, {'id': 17466, 'synset': 'wife.n.01', 'name': 'wife'}, {'id': 17467, 'synset': 'wiggler.n.01', 'name': 'wiggler'}, {'id': 17468, 'synset': 'wimp.n.01', 'name': 'wimp'}, {'id': 17469, 'synset': 'wing_commander.n.01', 'name': 'wing_commander'}, {'id': 17470, 'synset': 'winger.n.01', 'name': 'winger'}, {'id': 17471, 'synset': 'winner.n.02', 'name': 'winner'}, {'id': 17472, 'synset': 'winner.n.01', 'name': 'winner'}, {'id': 17473, 'synset': 'window_dresser.n.01', 'name': 'window_dresser'}, {'id': 17474, 'synset': 'winker.n.01', 'name': 'winker'}, {'id': 17475, 'synset': 'wiper.n.01', 'name': 'wiper'}, {'id': 17476, 'synset': 'wireman.n.01', 'name': 'wireman'}, {'id': 17477, 'synset': 'wise_guy.n.01', 'name': 'wise_guy'}, {'id': 17478, 'synset': 'witch_doctor.n.01', 'name': 'witch_doctor'}, {'id': 17479, 'synset': 'withdrawer.n.05', 'name': 'withdrawer'}, {'id': 17480, 'synset': 'withdrawer.n.01', 'name': 'withdrawer'}, {'id': 17481, 'synset': 'woman.n.01', 'name': 'woman'}, {'id': 17482, 'synset': 'woman.n.02', 'name': 'woman'}, {'id': 17483, 'synset': 'wonder_boy.n.01', 'name': 'wonder_boy'}, {'id': 17484, 'synset': 'wonderer.n.01', 'name': 'wonderer'}, {'id': 17485, 'synset': 'working_girl.n.01', 'name': 'working_girl'}, {'id': 17486, 'synset': 'workman.n.01', 'name': 'workman'}, {'id': 17487, 'synset': 'workmate.n.01', 'name': 'workmate'}, {'id': 17488, 'synset': 'worldling.n.01', 'name': 'worldling'}, {'id': 17489, 'synset': 'worshiper.n.01', 'name': 'worshiper'}, {'id': 17490, 'synset': 'worthy.n.01', 'name': 'worthy'}, {'id': 17491, 'synset': 'wrecker.n.01', 'name': 'wrecker'}, {'id': 17492, 'synset': 'wright.n.07', 'name': 'wright'}, {'id': 17493, 'synset': 'write-in_candidate.n.01', 'name': 'write-in_candidate'}, {'id': 17494, 'synset': 'writer.n.01', 'name': 'writer'}, {'id': 17495, 'synset': 'wykehamist.n.01', 'name': 'Wykehamist'}, {'id': 17496, 'synset': 'yakuza.n.01', 'name': 'yakuza'}, {'id': 17497, 'synset': 'yard_bird.n.01', 'name': 'yard_bird'}, {'id': 17498, 'synset': 'yardie.n.01', 'name': 'yardie'}, {'id': 17499, 'synset': 'yardman.n.01', 'name': 'yardman'}, {'id': 17500, 'synset': 'yardmaster.n.01', 'name': 'yardmaster'}, {'id': 17501, 'synset': 'yenta.n.02', 'name': 'yenta'}, {'id': 17502, 'synset': 'yogi.n.02', 'name': 'yogi'}, {'id': 17503, 'synset': 'young_buck.n.01', 'name': 'young_buck'}, {'id': 17504, 'synset': 'young_turk.n.02', 'name': 'young_Turk'}, {'id': 17505, 'synset': 'young_turk.n.01', 'name': 'Young_Turk'}, {'id': 17506, 'synset': 'zionist.n.01', 'name': 'Zionist'}, {'id': 17507, 'synset': 'zoo_keeper.n.01', 'name': 'zoo_keeper'}, {'id': 17508, 'synset': 'genet.n.01', 'name': 'Genet'}, {'id': 17509, 'synset': 'kennan.n.01', 'name': 'Kennan'}, {'id': 17510, 'synset': 'munro.n.01', 'name': 'Munro'}, {'id': 17511, 'synset': 'popper.n.01', 'name': 'Popper'}, {'id': 17512, 'synset': 'stoker.n.01', 'name': 'Stoker'}, {'id': 17513, 'synset': 'townes.n.01', 'name': 'Townes'}, {'id': 17514, 'synset': 'dust_storm.n.01', 'name': 'dust_storm'}, {'id': 17515, 'synset': 'parhelion.n.01', 'name': 'parhelion'}, {'id': 17516, 'synset': 'snow.n.01', 'name': 'snow'}, {'id': 17517, 'synset': 'facula.n.01', 'name': 'facula'}, {'id': 17518, 'synset': 'wave.n.08', 'name': 'wave'}, {'id': 17519, 'synset': 'microflora.n.01', 'name': 'microflora'}, {'id': 17520, 'synset': 'wilding.n.01', 'name': 'wilding'}, {'id': 17521, 'synset': 'semi-climber.n.01', 'name': 'semi-climber'}, {'id': 17522, 'synset': 'volva.n.01', 'name': 'volva'}, {'id': 17523, 'synset': 'basidiocarp.n.01', 'name': 'basidiocarp'}, {'id': 17524, 'synset': 'domatium.n.01', 'name': 'domatium'}, {'id': 17525, 'synset': 'apomict.n.01', 'name': 'apomict'}, {'id': 17526, 'synset': 'aquatic.n.01', 'name': 'aquatic'}, {'id': 17527, 'synset': 'bryophyte.n.01', 'name': 'bryophyte'}, {'id': 17528, 'synset': 'acrocarp.n.01', 'name': 'acrocarp'}, {'id': 17529, 'synset': 'sphagnum.n.01', 'name': 'sphagnum'}, {'id': 17530, 'synset': 'liverwort.n.01', 'name': 'liverwort'}, {'id': 17531, 'synset': 'hepatica.n.02', 'name': 'hepatica'}, {'id': 17532, 'synset': 'pecopteris.n.01', 'name': 'pecopteris'}, {'id': 17533, 'synset': 'pteridophyte.n.01', 'name': 'pteridophyte'}, {'id': 17534, 'synset': 'fern.n.01', 'name': 'fern'}, {'id': 17535, 'synset': 'fern_ally.n.01', 'name': 'fern_ally'}, {'id': 17536, 'synset': 'spore.n.01', 'name': 'spore'}, {'id': 17537, 'synset': 'carpospore.n.01', 'name': 'carpospore'}, {'id': 17538, 'synset': 'chlamydospore.n.01', 'name': 'chlamydospore'}, {'id': 17539, 'synset': 'conidium.n.01', 'name': 'conidium'}, {'id': 17540, 'synset': 'oospore.n.01', 'name': 'oospore'}, {'id': 17541, 'synset': 'tetraspore.n.01', 'name': 'tetraspore'}, {'id': 17542, 'synset': 'zoospore.n.01', 'name': 'zoospore'}, {'id': 17543, 'synset': 'cryptogam.n.01', 'name': 'cryptogam'}, {'id': 17544, 'synset': 'spermatophyte.n.01', 'name': 'spermatophyte'}, {'id': 17545, 'synset': 'seedling.n.01', 'name': 'seedling'}, {'id': 17546, 'synset': 'annual.n.01', 'name': 'annual'}, {'id': 17547, 'synset': 'biennial.n.01', 'name': 'biennial'}, {'id': 17548, 'synset': 'perennial.n.01', 'name': 'perennial'}, {'id': 17549, 'synset': 'hygrophyte.n.01', 'name': 'hygrophyte'}, {'id': 17550, 'synset': 'gymnosperm.n.01', 'name': 'gymnosperm'}, {'id': 17551, 'synset': 'gnetum.n.01', 'name': 'gnetum'}, {'id': 17552, 'synset': 'catha_edulis.n.01', 'name': 'Catha_edulis'}, {'id': 17553, 'synset': 'ephedra.n.01', 'name': 'ephedra'}, {'id': 17554, 'synset': 'mahuang.n.01', 'name': 'mahuang'}, {'id': 17555, 'synset': 'welwitschia.n.01', 'name': 'welwitschia'}, {'id': 17556, 'synset': 'cycad.n.01', 'name': 'cycad'}, {'id': 17557, 'synset': 'sago_palm.n.02', 'name': 'sago_palm'}, {'id': 17558, 'synset': 'false_sago.n.01', 'name': 'false_sago'}, {'id': 17559, 'synset': 'zamia.n.01', 'name': 'zamia'}, {'id': 17560, 'synset': 'coontie.n.01', 'name': 'coontie'}, {'id': 17561, 'synset': 'ceratozamia.n.01', 'name': 'ceratozamia'}, {'id': 17562, 'synset': 'dioon.n.01', 'name': 'dioon'}, {'id': 17563, 'synset': 'encephalartos.n.01', 'name': 'encephalartos'}, {'id': 17564, 'synset': 'kaffir_bread.n.01', 'name': 'kaffir_bread'}, {'id': 17565, 'synset': 'macrozamia.n.01', 'name': 'macrozamia'}, {'id': 17566, 'synset': 'burrawong.n.01', 'name': 'burrawong'}, {'id': 17567, 'synset': 'pine.n.01', 'name': 'pine'}, {'id': 17568, 'synset': 'pinon.n.01', 'name': 'pinon'}, {'id': 17569, 'synset': 'nut_pine.n.01', 'name': 'nut_pine'}, {'id': 17570, 'synset': 'pinon_pine.n.01', 'name': 'pinon_pine'}, {'id': 17571, 'synset': 'rocky_mountain_pinon.n.01', 'name': 'Rocky_mountain_pinon'}, {'id': 17572, 'synset': 'single-leaf.n.01', 'name': 'single-leaf'}, {'id': 17573, 'synset': 'bishop_pine.n.01', 'name': 'bishop_pine'}, {'id': 17574, 'synset': 'california_single-leaf_pinyon.n.01', 'name': 'California_single-leaf_pinyon'}, {'id': 17575, 'synset': "parry's_pinyon.n.01", 'name': "Parry's_pinyon"}, {'id': 17576, 'synset': 'spruce_pine.n.04', 'name': 'spruce_pine'}, {'id': 17577, 'synset': 'black_pine.n.05', 'name': 'black_pine'}, {'id': 17578, 'synset': 'pitch_pine.n.02', 'name': 'pitch_pine'}, {'id': 17579, 'synset': 'pond_pine.n.01', 'name': 'pond_pine'}, {'id': 17580, 'synset': 'stone_pine.n.01', 'name': 'stone_pine'}, {'id': 17581, 'synset': 'swiss_pine.n.01', 'name': 'Swiss_pine'}, {'id': 17582, 'synset': 'cembra_nut.n.01', 'name': 'cembra_nut'}, {'id': 17583, 'synset': 'swiss_mountain_pine.n.01', 'name': 'Swiss_mountain_pine'}, {'id': 17584, 'synset': 'ancient_pine.n.01', 'name': 'ancient_pine'}, {'id': 17585, 'synset': 'white_pine.n.01', 'name': 'white_pine'}, {'id': 17586, 'synset': 'american_white_pine.n.01', 'name': 'American_white_pine'}, {'id': 17587, 'synset': 'western_white_pine.n.01', 'name': 'western_white_pine'}, {'id': 17588, 'synset': 'southwestern_white_pine.n.01', 'name': 'southwestern_white_pine'}, {'id': 17589, 'synset': 'limber_pine.n.01', 'name': 'limber_pine'}, {'id': 17590, 'synset': 'whitebark_pine.n.01', 'name': 'whitebark_pine'}, {'id': 17591, 'synset': 'yellow_pine.n.01', 'name': 'yellow_pine'}, {'id': 17592, 'synset': 'ponderosa.n.01', 'name': 'ponderosa'}, {'id': 17593, 'synset': 'jeffrey_pine.n.01', 'name': 'Jeffrey_pine'}, {'id': 17594, 'synset': 'shore_pine.n.01', 'name': 'shore_pine'}, {'id': 17595, 'synset': 'sierra_lodgepole_pine.n.01', 'name': 'Sierra_lodgepole_pine'}, {'id': 17596, 'synset': 'loblolly_pine.n.01', 'name': 'loblolly_pine'}, {'id': 17597, 'synset': 'jack_pine.n.01', 'name': 'jack_pine'}, {'id': 17598, 'synset': 'swamp_pine.n.01', 'name': 'swamp_pine'}, {'id': 17599, 'synset': 'longleaf_pine.n.01', 'name': 'longleaf_pine'}, {'id': 17600, 'synset': 'shortleaf_pine.n.01', 'name': 'shortleaf_pine'}, {'id': 17601, 'synset': 'red_pine.n.02', 'name': 'red_pine'}, {'id': 17602, 'synset': 'scotch_pine.n.01', 'name': 'Scotch_pine'}, {'id': 17603, 'synset': 'scrub_pine.n.01', 'name': 'scrub_pine'}, {'id': 17604, 'synset': 'monterey_pine.n.01', 'name': 'Monterey_pine'}, {'id': 17605, 'synset': 'bristlecone_pine.n.01', 'name': 'bristlecone_pine'}, {'id': 17606, 'synset': 'table-mountain_pine.n.01', 'name': 'table-mountain_pine'}, {'id': 17607, 'synset': 'knobcone_pine.n.01', 'name': 'knobcone_pine'}, {'id': 17608, 'synset': 'japanese_red_pine.n.01', 'name': 'Japanese_red_pine'}, {'id': 17609, 'synset': 'japanese_black_pine.n.01', 'name': 'Japanese_black_pine'}, {'id': 17610, 'synset': 'torrey_pine.n.01', 'name': 'Torrey_pine'}, {'id': 17611, 'synset': 'larch.n.02', 'name': 'larch'}, {'id': 17612, 'synset': 'american_larch.n.01', 'name': 'American_larch'}, {'id': 17613, 'synset': 'western_larch.n.01', 'name': 'western_larch'}, {'id': 17614, 'synset': 'subalpine_larch.n.01', 'name': 'subalpine_larch'}, {'id': 17615, 'synset': 'european_larch.n.01', 'name': 'European_larch'}, {'id': 17616, 'synset': 'siberian_larch.n.01', 'name': 'Siberian_larch'}, {'id': 17617, 'synset': 'golden_larch.n.01', 'name': 'golden_larch'}, {'id': 17618, 'synset': 'fir.n.02', 'name': 'fir'}, {'id': 17619, 'synset': 'silver_fir.n.01', 'name': 'silver_fir'}, {'id': 17620, 'synset': 'amabilis_fir.n.01', 'name': 'amabilis_fir'}, {'id': 17621, 'synset': 'european_silver_fir.n.01', 'name': 'European_silver_fir'}, {'id': 17622, 'synset': 'white_fir.n.01', 'name': 'white_fir'}, {'id': 17623, 'synset': 'balsam_fir.n.01', 'name': 'balsam_fir'}, {'id': 17624, 'synset': 'fraser_fir.n.01', 'name': 'Fraser_fir'}, {'id': 17625, 'synset': 'lowland_fir.n.01', 'name': 'lowland_fir'}, {'id': 17626, 'synset': 'alpine_fir.n.01', 'name': 'Alpine_fir'}, {'id': 17627, 'synset': 'santa_lucia_fir.n.01', 'name': 'Santa_Lucia_fir'}, {'id': 17628, 'synset': 'cedar.n.03', 'name': 'cedar'}, {'id': 17629, 'synset': 'cedar_of_lebanon.n.01', 'name': 'cedar_of_Lebanon'}, {'id': 17630, 'synset': 'deodar.n.01', 'name': 'deodar'}, {'id': 17631, 'synset': 'atlas_cedar.n.01', 'name': 'Atlas_cedar'}, {'id': 17632, 'synset': 'spruce.n.02', 'name': 'spruce'}, {'id': 17633, 'synset': 'norway_spruce.n.01', 'name': 'Norway_spruce'}, {'id': 17634, 'synset': 'weeping_spruce.n.01', 'name': 'weeping_spruce'}, {'id': 17635, 'synset': 'engelmann_spruce.n.01', 'name': 'Engelmann_spruce'}, {'id': 17636, 'synset': 'white_spruce.n.01', 'name': 'white_spruce'}, {'id': 17637, 'synset': 'black_spruce.n.01', 'name': 'black_spruce'}, {'id': 17638, 'synset': 'siberian_spruce.n.01', 'name': 'Siberian_spruce'}, {'id': 17639, 'synset': 'sitka_spruce.n.01', 'name': 'Sitka_spruce'}, {'id': 17640, 'synset': 'oriental_spruce.n.01', 'name': 'oriental_spruce'}, {'id': 17641, 'synset': 'colorado_spruce.n.01', 'name': 'Colorado_spruce'}, {'id': 17642, 'synset': 'red_spruce.n.01', 'name': 'red_spruce'}, {'id': 17643, 'synset': 'hemlock.n.04', 'name': 'hemlock'}, {'id': 17644, 'synset': 'eastern_hemlock.n.01', 'name': 'eastern_hemlock'}, {'id': 17645, 'synset': 'carolina_hemlock.n.01', 'name': 'Carolina_hemlock'}, {'id': 17646, 'synset': 'mountain_hemlock.n.01', 'name': 'mountain_hemlock'}, {'id': 17647, 'synset': 'western_hemlock.n.01', 'name': 'western_hemlock'}, {'id': 17648, 'synset': 'douglas_fir.n.02', 'name': 'douglas_fir'}, {'id': 17649, 'synset': 'green_douglas_fir.n.01', 'name': 'green_douglas_fir'}, {'id': 17650, 'synset': 'big-cone_spruce.n.01', 'name': 'big-cone_spruce'}, {'id': 17651, 'synset': 'cathaya.n.01', 'name': 'Cathaya'}, {'id': 17652, 'synset': 'cedar.n.01', 'name': 'cedar'}, {'id': 17653, 'synset': 'cypress.n.02', 'name': 'cypress'}, {'id': 17654, 'synset': 'gowen_cypress.n.01', 'name': 'gowen_cypress'}, {'id': 17655, 'synset': 'pygmy_cypress.n.01', 'name': 'pygmy_cypress'}, {'id': 17656, 'synset': 'santa_cruz_cypress.n.01', 'name': 'Santa_Cruz_cypress'}, {'id': 17657, 'synset': 'arizona_cypress.n.01', 'name': 'Arizona_cypress'}, {'id': 17658, 'synset': 'guadalupe_cypress.n.01', 'name': 'Guadalupe_cypress'}, {'id': 17659, 'synset': 'monterey_cypress.n.01', 'name': 'Monterey_cypress'}, {'id': 17660, 'synset': 'mexican_cypress.n.01', 'name': 'Mexican_cypress'}, {'id': 17661, 'synset': 'italian_cypress.n.01', 'name': 'Italian_cypress'}, {'id': 17662, 'synset': 'king_william_pine.n.01', 'name': 'King_William_pine'}, {'id': 17663, 'synset': 'chilean_cedar.n.01', 'name': 'Chilean_cedar'}, {'id': 17664, 'synset': 'incense_cedar.n.02', 'name': 'incense_cedar'}, {'id': 17665, 'synset': 'southern_white_cedar.n.01', 'name': 'southern_white_cedar'}, {'id': 17666, 'synset': 'oregon_cedar.n.01', 'name': 'Oregon_cedar'}, {'id': 17667, 'synset': 'yellow_cypress.n.01', 'name': 'yellow_cypress'}, {'id': 17668, 'synset': 'japanese_cedar.n.01', 'name': 'Japanese_cedar'}, {'id': 17669, 'synset': 'juniper_berry.n.01', 'name': 'juniper_berry'}, {'id': 17670, 'synset': 'incense_cedar.n.01', 'name': 'incense_cedar'}, {'id': 17671, 'synset': 'kawaka.n.01', 'name': 'kawaka'}, {'id': 17672, 'synset': 'pahautea.n.01', 'name': 'pahautea'}, {'id': 17673, 'synset': 'metasequoia.n.01', 'name': 'metasequoia'}, {'id': 17674, 'synset': 'arborvitae.n.01', 'name': 'arborvitae'}, {'id': 17675, 'synset': 'western_red_cedar.n.01', 'name': 'western_red_cedar'}, {'id': 17676, 'synset': 'american_arborvitae.n.01', 'name': 'American_arborvitae'}, {'id': 17677, 'synset': 'oriental_arborvitae.n.01', 'name': 'Oriental_arborvitae'}, {'id': 17678, 'synset': 'hiba_arborvitae.n.01', 'name': 'hiba_arborvitae'}, {'id': 17679, 'synset': 'keteleeria.n.01', 'name': 'keteleeria'}, {'id': 17680, 'synset': 'wollemi_pine.n.01', 'name': 'Wollemi_pine'}, {'id': 17681, 'synset': 'araucaria.n.01', 'name': 'araucaria'}, {'id': 17682, 'synset': 'monkey_puzzle.n.01', 'name': 'monkey_puzzle'}, {'id': 17683, 'synset': 'norfolk_island_pine.n.01', 'name': 'norfolk_island_pine'}, {'id': 17684, 'synset': 'new_caledonian_pine.n.01', 'name': 'new_caledonian_pine'}, {'id': 17685, 'synset': 'bunya_bunya.n.01', 'name': 'bunya_bunya'}, {'id': 17686, 'synset': 'hoop_pine.n.01', 'name': 'hoop_pine'}, {'id': 17687, 'synset': 'kauri_pine.n.01', 'name': 'kauri_pine'}, {'id': 17688, 'synset': 'kauri.n.02', 'name': 'kauri'}, {'id': 17689, 'synset': 'amboina_pine.n.01', 'name': 'amboina_pine'}, {'id': 17690, 'synset': 'dundathu_pine.n.01', 'name': 'dundathu_pine'}, {'id': 17691, 'synset': 'red_kauri.n.01', 'name': 'red_kauri'}, {'id': 17692, 'synset': 'plum-yew.n.01', 'name': 'plum-yew'}, {'id': 17693, 'synset': 'california_nutmeg.n.01', 'name': 'California_nutmeg'}, {'id': 17694, 'synset': 'stinking_cedar.n.01', 'name': 'stinking_cedar'}, {'id': 17695, 'synset': 'celery_pine.n.01', 'name': 'celery_pine'}, {'id': 17696, 'synset': 'celery_top_pine.n.01', 'name': 'celery_top_pine'}, {'id': 17697, 'synset': 'tanekaha.n.01', 'name': 'tanekaha'}, {'id': 17698, 'synset': 'alpine_celery_pine.n.01', 'name': 'Alpine_celery_pine'}, {'id': 17699, 'synset': 'yellowwood.n.02', 'name': 'yellowwood'}, {'id': 17700, 'synset': 'gymnospermous_yellowwood.n.01', 'name': 'gymnospermous_yellowwood'}, {'id': 17701, 'synset': 'podocarp.n.01', 'name': 'podocarp'}, {'id': 17702, 'synset': 'yacca.n.01', 'name': 'yacca'}, {'id': 17703, 'synset': 'brown_pine.n.01', 'name': 'brown_pine'}, {'id': 17704, 'synset': 'cape_yellowwood.n.01', 'name': 'cape_yellowwood'}, {'id': 17705, 'synset': 'south-african_yellowwood.n.01', 'name': 'South-African_yellowwood'}, {'id': 17706, 'synset': 'alpine_totara.n.01', 'name': 'alpine_totara'}, {'id': 17707, 'synset': 'totara.n.01', 'name': 'totara'}, {'id': 17708, 'synset': 'common_yellowwood.n.01', 'name': 'common_yellowwood'}, {'id': 17709, 'synset': 'kahikatea.n.01', 'name': 'kahikatea'}, {'id': 17710, 'synset': 'rimu.n.01', 'name': 'rimu'}, {'id': 17711, 'synset': 'tarwood.n.02', 'name': 'tarwood'}, {'id': 17712, 'synset': 'common_sickle_pine.n.01', 'name': 'common_sickle_pine'}, {'id': 17713, 'synset': 'yellow-leaf_sickle_pine.n.01', 'name': 'yellow-leaf_sickle_pine'}, {'id': 17714, 'synset': 'tarwood.n.01', 'name': 'tarwood'}, {'id': 17715, 'synset': 'westland_pine.n.01', 'name': 'westland_pine'}, {'id': 17716, 'synset': 'huon_pine.n.01', 'name': 'huon_pine'}, {'id': 17717, 'synset': 'chilean_rimu.n.01', 'name': 'Chilean_rimu'}, {'id': 17718, 'synset': 'mountain_rimu.n.01', 'name': 'mountain_rimu'}, {'id': 17719, 'synset': 'nagi.n.01', 'name': 'nagi'}, {'id': 17720, 'synset': 'miro.n.01', 'name': 'miro'}, {'id': 17721, 'synset': 'matai.n.01', 'name': 'matai'}, {'id': 17722, 'synset': 'plum-fruited_yew.n.01', 'name': 'plum-fruited_yew'}, {'id': 17723, 'synset': 'prince_albert_yew.n.01', 'name': 'Prince_Albert_yew'}, {'id': 17724, 'synset': 'sundacarpus_amara.n.01', 'name': 'Sundacarpus_amara'}, {'id': 17725, 'synset': 'japanese_umbrella_pine.n.01', 'name': 'Japanese_umbrella_pine'}, {'id': 17726, 'synset': 'yew.n.02', 'name': 'yew'}, {'id': 17727, 'synset': 'old_world_yew.n.01', 'name': 'Old_World_yew'}, {'id': 17728, 'synset': 'pacific_yew.n.01', 'name': 'Pacific_yew'}, {'id': 17729, 'synset': 'japanese_yew.n.01', 'name': 'Japanese_yew'}, {'id': 17730, 'synset': 'florida_yew.n.01', 'name': 'Florida_yew'}, {'id': 17731, 'synset': 'new_caledonian_yew.n.01', 'name': 'New_Caledonian_yew'}, {'id': 17732, 'synset': 'white-berry_yew.n.01', 'name': 'white-berry_yew'}, {'id': 17733, 'synset': 'ginkgo.n.01', 'name': 'ginkgo'}, {'id': 17734, 'synset': 'angiosperm.n.01', 'name': 'angiosperm'}, {'id': 17735, 'synset': 'dicot.n.01', 'name': 'dicot'}, {'id': 17736, 'synset': 'monocot.n.01', 'name': 'monocot'}, {'id': 17737, 'synset': 'floret.n.01', 'name': 'floret'}, {'id': 17738, 'synset': 'flower.n.01', 'name': 'flower'}, {'id': 17739, 'synset': 'bloomer.n.01', 'name': 'bloomer'}, {'id': 17740, 'synset': 'wildflower.n.01', 'name': 'wildflower'}, {'id': 17741, 'synset': 'apetalous_flower.n.01', 'name': 'apetalous_flower'}, {'id': 17742, 'synset': 'inflorescence.n.02', 'name': 'inflorescence'}, {'id': 17743, 'synset': 'rosebud.n.01', 'name': 'rosebud'}, {'id': 17744, 'synset': 'gynostegium.n.01', 'name': 'gynostegium'}, {'id': 17745, 'synset': 'pollinium.n.01', 'name': 'pollinium'}, {'id': 17746, 'synset': 'pistil.n.01', 'name': 'pistil'}, {'id': 17747, 'synset': 'gynobase.n.01', 'name': 'gynobase'}, {'id': 17748, 'synset': 'gynophore.n.01', 'name': 'gynophore'}, {'id': 17749, 'synset': 'stylopodium.n.01', 'name': 'stylopodium'}, {'id': 17750, 'synset': 'carpophore.n.01', 'name': 'carpophore'}, {'id': 17751, 'synset': 'cornstalk.n.01', 'name': 'cornstalk'}, {'id': 17752, 'synset': 'petiolule.n.01', 'name': 'petiolule'}, {'id': 17753, 'synset': 'mericarp.n.01', 'name': 'mericarp'}, {'id': 17754, 'synset': 'micropyle.n.01', 'name': 'micropyle'}, {'id': 17755, 'synset': 'germ_tube.n.01', 'name': 'germ_tube'}, {'id': 17756, 'synset': 'pollen_tube.n.01', 'name': 'pollen_tube'}, {'id': 17757, 'synset': 'gemma.n.01', 'name': 'gemma'}, {'id': 17758, 'synset': 'galbulus.n.01', 'name': 'galbulus'}, {'id': 17759, 'synset': 'nectary.n.01', 'name': 'nectary'}, {'id': 17760, 'synset': 'pericarp.n.01', 'name': 'pericarp'}, {'id': 17761, 'synset': 'epicarp.n.01', 'name': 'epicarp'}, {'id': 17762, 'synset': 'mesocarp.n.01', 'name': 'mesocarp'}, {'id': 17763, 'synset': 'pip.n.03', 'name': 'pip'}, {'id': 17764, 'synset': 'silique.n.01', 'name': 'silique'}, {'id': 17765, 'synset': 'cataphyll.n.01', 'name': 'cataphyll'}, {'id': 17766, 'synset': 'perisperm.n.01', 'name': 'perisperm'}, {'id': 17767, 'synset': 'monocarp.n.01', 'name': 'monocarp'}, {'id': 17768, 'synset': 'sporophyte.n.01', 'name': 'sporophyte'}, {'id': 17769, 'synset': 'gametophyte.n.01', 'name': 'gametophyte'}, {'id': 17770, 'synset': 'megasporangium.n.01', 'name': 'megasporangium'}, {'id': 17771, 'synset': 'microspore.n.01', 'name': 'microspore'}, {'id': 17772, 'synset': 'microsporangium.n.01', 'name': 'microsporangium'}, {'id': 17773, 'synset': 'microsporophyll.n.01', 'name': 'microsporophyll'}, {'id': 17774, 'synset': 'archespore.n.01', 'name': 'archespore'}, {'id': 17775, 'synset': 'bonduc_nut.n.01', 'name': 'bonduc_nut'}, {'id': 17776, 'synset': "job's_tears.n.01", 'name': "Job's_tears"}, {'id': 17777, 'synset': 'oilseed.n.01', 'name': 'oilseed'}, {'id': 17778, 'synset': 'castor_bean.n.01', 'name': 'castor_bean'}, {'id': 17779, 'synset': 'cottonseed.n.01', 'name': 'cottonseed'}, {'id': 17780, 'synset': 'candlenut.n.02', 'name': 'candlenut'}, {'id': 17781, 'synset': 'peach_pit.n.01', 'name': 'peach_pit'}, {'id': 17782, 'synset': 'hypanthium.n.01', 'name': 'hypanthium'}, {'id': 17783, 'synset': 'petal.n.01', 'name': 'petal'}, {'id': 17784, 'synset': 'corolla.n.01', 'name': 'corolla'}, {'id': 17785, 'synset': 'lip.n.02', 'name': 'lip'}, {'id': 17786, 'synset': 'perianth.n.01', 'name': 'perianth'}, {'id': 17787, 'synset': 'thistledown.n.01', 'name': 'thistledown'}, {'id': 17788, 'synset': 'custard_apple.n.01', 'name': 'custard_apple'}, {'id': 17789, 'synset': 'cherimoya.n.01', 'name': 'cherimoya'}, {'id': 17790, 'synset': 'ilama.n.01', 'name': 'ilama'}, {'id': 17791, 'synset': 'soursop.n.01', 'name': 'soursop'}, {'id': 17792, 'synset': "bullock's_heart.n.01", 'name': "bullock's_heart"}, {'id': 17793, 'synset': 'sweetsop.n.01', 'name': 'sweetsop'}, {'id': 17794, 'synset': 'pond_apple.n.01', 'name': 'pond_apple'}, {'id': 17795, 'synset': 'pawpaw.n.02', 'name': 'pawpaw'}, {'id': 17796, 'synset': 'ilang-ilang.n.02', 'name': 'ilang-ilang'}, {'id': 17797, 'synset': 'lancewood.n.02', 'name': 'lancewood'}, {'id': 17798, 'synset': 'guinea_pepper.n.02', 'name': 'Guinea_pepper'}, {'id': 17799, 'synset': 'barberry.n.01', 'name': 'barberry'}, {'id': 17800, 'synset': 'american_barberry.n.01', 'name': 'American_barberry'}, {'id': 17801, 'synset': 'common_barberry.n.01', 'name': 'common_barberry'}, {'id': 17802, 'synset': 'japanese_barberry.n.01', 'name': 'Japanese_barberry'}, {'id': 17803, 'synset': 'oregon_grape.n.02', 'name': 'Oregon_grape'}, {'id': 17804, 'synset': 'oregon_grape.n.01', 'name': 'Oregon_grape'}, {'id': 17805, 'synset': 'mayapple.n.01', 'name': 'mayapple'}, {'id': 17806, 'synset': 'may_apple.n.01', 'name': 'May_apple'}, {'id': 17807, 'synset': 'allspice.n.02', 'name': 'allspice'}, {'id': 17808, 'synset': 'carolina_allspice.n.01', 'name': 'Carolina_allspice'}, {'id': 17809, 'synset': 'spicebush.n.02', 'name': 'spicebush'}, {'id': 17810, 'synset': 'katsura_tree.n.01', 'name': 'katsura_tree'}, {'id': 17811, 'synset': 'laurel.n.01', 'name': 'laurel'}, {'id': 17812, 'synset': 'true_laurel.n.01', 'name': 'true_laurel'}, {'id': 17813, 'synset': 'camphor_tree.n.01', 'name': 'camphor_tree'}, {'id': 17814, 'synset': 'cinnamon.n.02', 'name': 'cinnamon'}, {'id': 17815, 'synset': 'cassia.n.03', 'name': 'cassia'}, {'id': 17816, 'synset': 'cassia_bark.n.01', 'name': 'cassia_bark'}, {'id': 17817, 'synset': 'saigon_cinnamon.n.01', 'name': 'Saigon_cinnamon'}, {'id': 17818, 'synset': 'cinnamon_bark.n.01', 'name': 'cinnamon_bark'}, {'id': 17819, 'synset': 'spicebush.n.01', 'name': 'spicebush'}, {'id': 17820, 'synset': 'avocado.n.02', 'name': 'avocado'}, {'id': 17821, 'synset': 'laurel-tree.n.01', 'name': 'laurel-tree'}, {'id': 17822, 'synset': 'sassafras.n.01', 'name': 'sassafras'}, {'id': 17823, 'synset': 'california_laurel.n.01', 'name': 'California_laurel'}, {'id': 17824, 'synset': 'anise_tree.n.01', 'name': 'anise_tree'}, {'id': 17825, 'synset': 'purple_anise.n.01', 'name': 'purple_anise'}, {'id': 17826, 'synset': 'star_anise.n.02', 'name': 'star_anise'}, {'id': 17827, 'synset': 'star_anise.n.01', 'name': 'star_anise'}, {'id': 17828, 'synset': 'magnolia.n.02', 'name': 'magnolia'}, {'id': 17829, 'synset': 'southern_magnolia.n.01', 'name': 'southern_magnolia'}, {'id': 17830, 'synset': 'umbrella_tree.n.02', 'name': 'umbrella_tree'}, {'id': 17831, 'synset': 'earleaved_umbrella_tree.n.01', 'name': 'earleaved_umbrella_tree'}, {'id': 17832, 'synset': 'cucumber_tree.n.01', 'name': 'cucumber_tree'}, {'id': 17833, 'synset': 'large-leaved_magnolia.n.01', 'name': 'large-leaved_magnolia'}, {'id': 17834, 'synset': 'saucer_magnolia.n.01', 'name': 'saucer_magnolia'}, {'id': 17835, 'synset': 'star_magnolia.n.01', 'name': 'star_magnolia'}, {'id': 17836, 'synset': 'sweet_bay.n.01', 'name': 'sweet_bay'}, {'id': 17837, 'synset': 'manglietia.n.01', 'name': 'manglietia'}, {'id': 17838, 'synset': 'tulip_tree.n.01', 'name': 'tulip_tree'}, {'id': 17839, 'synset': 'moonseed.n.01', 'name': 'moonseed'}, {'id': 17840, 'synset': 'common_moonseed.n.01', 'name': 'common_moonseed'}, {'id': 17841, 'synset': 'carolina_moonseed.n.01', 'name': 'Carolina_moonseed'}, {'id': 17842, 'synset': 'nutmeg.n.01', 'name': 'nutmeg'}, {'id': 17843, 'synset': 'water_nymph.n.02', 'name': 'water_nymph'}, {'id': 17844, 'synset': 'european_white_lily.n.01', 'name': 'European_white_lily'}, {'id': 17845, 'synset': 'southern_spatterdock.n.01', 'name': 'southern_spatterdock'}, {'id': 17846, 'synset': 'lotus.n.01', 'name': 'lotus'}, {'id': 17847, 'synset': 'water_chinquapin.n.01', 'name': 'water_chinquapin'}, {'id': 17848, 'synset': 'water-shield.n.02', 'name': 'water-shield'}, {'id': 17849, 'synset': 'water-shield.n.01', 'name': 'water-shield'}, {'id': 17850, 'synset': 'peony.n.01', 'name': 'peony'}, {'id': 17851, 'synset': 'buttercup.n.01', 'name': 'buttercup'}, {'id': 17852, 'synset': 'meadow_buttercup.n.01', 'name': 'meadow_buttercup'}, {'id': 17853, 'synset': 'water_crowfoot.n.01', 'name': 'water_crowfoot'}, {'id': 17854, 'synset': 'lesser_celandine.n.01', 'name': 'lesser_celandine'}, {'id': 17855, 'synset': 'lesser_spearwort.n.01', 'name': 'lesser_spearwort'}, {'id': 17856, 'synset': 'greater_spearwort.n.01', 'name': 'greater_spearwort'}, {'id': 17857, 'synset': 'western_buttercup.n.01', 'name': 'western_buttercup'}, {'id': 17858, 'synset': 'creeping_buttercup.n.01', 'name': 'creeping_buttercup'}, {'id': 17859, 'synset': 'cursed_crowfoot.n.01', 'name': 'cursed_crowfoot'}, {'id': 17860, 'synset': 'aconite.n.01', 'name': 'aconite'}, {'id': 17861, 'synset': 'monkshood.n.01', 'name': 'monkshood'}, {'id': 17862, 'synset': 'wolfsbane.n.01', 'name': 'wolfsbane'}, {'id': 17863, 'synset': 'baneberry.n.02', 'name': 'baneberry'}, {'id': 17864, 'synset': 'baneberry.n.01', 'name': 'baneberry'}, {'id': 17865, 'synset': 'red_baneberry.n.01', 'name': 'red_baneberry'}, {'id': 17866, 'synset': "pheasant's-eye.n.01", 'name': "pheasant's-eye"}, {'id': 17867, 'synset': 'anemone.n.01', 'name': 'anemone'}, {'id': 17868, 'synset': 'alpine_anemone.n.01', 'name': 'Alpine_anemone'}, {'id': 17869, 'synset': 'canada_anemone.n.01', 'name': 'Canada_anemone'}, {'id': 17870, 'synset': 'thimbleweed.n.01', 'name': 'thimbleweed'}, {'id': 17871, 'synset': 'wood_anemone.n.02', 'name': 'wood_anemone'}, {'id': 17872, 'synset': 'wood_anemone.n.01', 'name': 'wood_anemone'}, {'id': 17873, 'synset': 'longheaded_thimbleweed.n.01', 'name': 'longheaded_thimbleweed'}, {'id': 17874, 'synset': 'snowdrop_anemone.n.01', 'name': 'snowdrop_anemone'}, {'id': 17875, 'synset': 'virginia_thimbleweed.n.01', 'name': 'Virginia_thimbleweed'}, {'id': 17876, 'synset': 'rue_anemone.n.01', 'name': 'rue_anemone'}, {'id': 17877, 'synset': 'columbine.n.01', 'name': 'columbine'}, {'id': 17878, 'synset': 'meeting_house.n.01', 'name': 'meeting_house'}, {'id': 17879, 'synset': 'blue_columbine.n.01', 'name': 'blue_columbine'}, {'id': 17880, 'synset': "granny's_bonnets.n.01", 'name': "granny's_bonnets"}, {'id': 17881, 'synset': 'marsh_marigold.n.01', 'name': 'marsh_marigold'}, {'id': 17882, 'synset': 'american_bugbane.n.01', 'name': 'American_bugbane'}, {'id': 17883, 'synset': 'black_cohosh.n.01', 'name': 'black_cohosh'}, {'id': 17884, 'synset': 'fetid_bugbane.n.01', 'name': 'fetid_bugbane'}, {'id': 17885, 'synset': 'clematis.n.01', 'name': 'clematis'}, {'id': 17886, 'synset': 'pine_hyacinth.n.01', 'name': 'pine_hyacinth'}, {'id': 17887, 'synset': 'blue_jasmine.n.01', 'name': 'blue_jasmine'}, {'id': 17888, 'synset': 'golden_clematis.n.01', 'name': 'golden_clematis'}, {'id': 17889, 'synset': 'scarlet_clematis.n.01', 'name': 'scarlet_clematis'}, {'id': 17890, 'synset': 'leather_flower.n.02', 'name': 'leather_flower'}, {'id': 17891, 'synset': 'leather_flower.n.01', 'name': 'leather_flower'}, {'id': 17892, 'synset': "virgin's_bower.n.01", 'name': "virgin's_bower"}, {'id': 17893, 'synset': 'purple_clematis.n.01', 'name': 'purple_clematis'}, {'id': 17894, 'synset': 'goldthread.n.01', 'name': 'goldthread'}, {'id': 17895, 'synset': 'rocket_larkspur.n.01', 'name': 'rocket_larkspur'}, {'id': 17896, 'synset': 'delphinium.n.01', 'name': 'delphinium'}, {'id': 17897, 'synset': 'larkspur.n.01', 'name': 'larkspur'}, {'id': 17898, 'synset': 'winter_aconite.n.01', 'name': 'winter_aconite'}, {'id': 17899, 'synset': 'lenten_rose.n.01', 'name': 'lenten_rose'}, {'id': 17900, 'synset': 'green_hellebore.n.01', 'name': 'green_hellebore'}, {'id': 17901, 'synset': 'hepatica.n.01', 'name': 'hepatica'}, {'id': 17902, 'synset': 'goldenseal.n.01', 'name': 'goldenseal'}, {'id': 17903, 'synset': 'false_rue_anemone.n.01', 'name': 'false_rue_anemone'}, {'id': 17904, 'synset': 'giant_buttercup.n.01', 'name': 'giant_buttercup'}, {'id': 17905, 'synset': 'nigella.n.01', 'name': 'nigella'}, {'id': 17906, 'synset': 'love-in-a-mist.n.03', 'name': 'love-in-a-mist'}, {'id': 17907, 'synset': 'fennel_flower.n.01', 'name': 'fennel_flower'}, {'id': 17908, 'synset': 'black_caraway.n.01', 'name': 'black_caraway'}, {'id': 17909, 'synset': 'pasqueflower.n.01', 'name': 'pasqueflower'}, {'id': 17910, 'synset': 'meadow_rue.n.01', 'name': 'meadow_rue'}, {'id': 17911, 'synset': 'false_bugbane.n.01', 'name': 'false_bugbane'}, {'id': 17912, 'synset': 'globeflower.n.01', 'name': 'globeflower'}, {'id': 17913, 'synset': "winter's_bark.n.02", 'name': "winter's_bark"}, {'id': 17914, 'synset': 'pepper_shrub.n.01', 'name': 'pepper_shrub'}, {'id': 17915, 'synset': 'sweet_gale.n.01', 'name': 'sweet_gale'}, {'id': 17916, 'synset': 'wax_myrtle.n.01', 'name': 'wax_myrtle'}, {'id': 17917, 'synset': 'bay_myrtle.n.01', 'name': 'bay_myrtle'}, {'id': 17918, 'synset': 'bayberry.n.02', 'name': 'bayberry'}, {'id': 17919, 'synset': 'sweet_fern.n.02', 'name': 'sweet_fern'}, {'id': 17920, 'synset': 'corkwood.n.01', 'name': 'corkwood'}, {'id': 17921, 'synset': 'jointed_rush.n.01', 'name': 'jointed_rush'}, {'id': 17922, 'synset': 'toad_rush.n.01', 'name': 'toad_rush'}, {'id': 17923, 'synset': 'slender_rush.n.01', 'name': 'slender_rush'}, {'id': 17924, 'synset': 'zebrawood.n.02', 'name': 'zebrawood'}, {'id': 17925, 'synset': 'connarus_guianensis.n.01', 'name': 'Connarus_guianensis'}, {'id': 17926, 'synset': 'legume.n.01', 'name': 'legume'}, {'id': 17927, 'synset': 'peanut.n.01', 'name': 'peanut'}, {'id': 17928, 'synset': 'granadilla_tree.n.01', 'name': 'granadilla_tree'}, {'id': 17929, 'synset': 'arariba.n.01', 'name': 'arariba'}, {'id': 17930, 'synset': 'tonka_bean.n.01', 'name': 'tonka_bean'}, {'id': 17931, 'synset': 'courbaril.n.01', 'name': 'courbaril'}, {'id': 17932, 'synset': 'melilotus.n.01', 'name': 'melilotus'}, {'id': 17933, 'synset': 'darling_pea.n.01', 'name': 'darling_pea'}, {'id': 17934, 'synset': 'smooth_darling_pea.n.01', 'name': 'smooth_darling_pea'}, {'id': 17935, 'synset': 'clover.n.01', 'name': 'clover'}, {'id': 17936, 'synset': 'alpine_clover.n.01', 'name': 'alpine_clover'}, {'id': 17937, 'synset': 'hop_clover.n.02', 'name': 'hop_clover'}, {'id': 17938, 'synset': 'crimson_clover.n.01', 'name': 'crimson_clover'}, {'id': 17939, 'synset': 'red_clover.n.01', 'name': 'red_clover'}, {'id': 17940, 'synset': 'buffalo_clover.n.02', 'name': 'buffalo_clover'}, {'id': 17941, 'synset': 'white_clover.n.01', 'name': 'white_clover'}, {'id': 17942, 'synset': 'mimosa.n.02', 'name': 'mimosa'}, {'id': 17943, 'synset': 'acacia.n.01', 'name': 'acacia'}, {'id': 17944, 'synset': 'shittah.n.01', 'name': 'shittah'}, {'id': 17945, 'synset': 'wattle.n.03', 'name': 'wattle'}, {'id': 17946, 'synset': 'black_wattle.n.01', 'name': 'black_wattle'}, {'id': 17947, 'synset': 'gidgee.n.01', 'name': 'gidgee'}, {'id': 17948, 'synset': 'catechu.n.02', 'name': 'catechu'}, {'id': 17949, 'synset': 'silver_wattle.n.01', 'name': 'silver_wattle'}, {'id': 17950, 'synset': 'huisache.n.01', 'name': 'huisache'}, {'id': 17951, 'synset': 'lightwood.n.01', 'name': 'lightwood'}, {'id': 17952, 'synset': 'golden_wattle.n.01', 'name': 'golden_wattle'}, {'id': 17953, 'synset': 'fever_tree.n.04', 'name': 'fever_tree'}, {'id': 17954, 'synset': 'coralwood.n.01', 'name': 'coralwood'}, {'id': 17955, 'synset': 'albizzia.n.01', 'name': 'albizzia'}, {'id': 17956, 'synset': 'silk_tree.n.01', 'name': 'silk_tree'}, {'id': 17957, 'synset': 'siris.n.01', 'name': 'siris'}, {'id': 17958, 'synset': 'rain_tree.n.01', 'name': 'rain_tree'}, {'id': 17959, 'synset': 'calliandra.n.01', 'name': 'calliandra'}, {'id': 17960, 'synset': 'conacaste.n.01', 'name': 'conacaste'}, {'id': 17961, 'synset': 'inga.n.01', 'name': 'inga'}, {'id': 17962, 'synset': 'ice-cream_bean.n.01', 'name': 'ice-cream_bean'}, {'id': 17963, 'synset': 'guama.n.01', 'name': 'guama'}, {'id': 17964, 'synset': 'lead_tree.n.01', 'name': 'lead_tree'}, {'id': 17965, 'synset': 'wild_tamarind.n.02', 'name': 'wild_tamarind'}, {'id': 17966, 'synset': 'sabicu.n.02', 'name': 'sabicu'}, {'id': 17967, 'synset': 'nitta_tree.n.01', 'name': 'nitta_tree'}, {'id': 17968, 'synset': 'parkia_javanica.n.01', 'name': 'Parkia_javanica'}, {'id': 17969, 'synset': 'manila_tamarind.n.01', 'name': 'manila_tamarind'}, {'id': 17970, 'synset': "cat's-claw.n.01", 'name': "cat's-claw"}, {'id': 17971, 'synset': 'honey_mesquite.n.01', 'name': 'honey_mesquite'}, {'id': 17972, 'synset': 'algarroba.n.03', 'name': 'algarroba'}, {'id': 17973, 'synset': 'screw_bean.n.02', 'name': 'screw_bean'}, {'id': 17974, 'synset': 'screw_bean.n.01', 'name': 'screw_bean'}, {'id': 17975, 'synset': 'dogbane.n.01', 'name': 'dogbane'}, {'id': 17976, 'synset': 'indian_hemp.n.03', 'name': 'Indian_hemp'}, {'id': 17977, 'synset': "bushman's_poison.n.01", 'name': "bushman's_poison"}, {'id': 17978, 'synset': 'impala_lily.n.01', 'name': 'impala_lily'}, {'id': 17979, 'synset': 'allamanda.n.01', 'name': 'allamanda'}, {'id': 17980, 'synset': 'common_allamanda.n.01', 'name': 'common_allamanda'}, {'id': 17981, 'synset': 'dita.n.01', 'name': 'dita'}, {'id': 17982, 'synset': 'nepal_trumpet_flower.n.01', 'name': 'Nepal_trumpet_flower'}, {'id': 17983, 'synset': 'carissa.n.01', 'name': 'carissa'}, {'id': 17984, 'synset': 'hedge_thorn.n.01', 'name': 'hedge_thorn'}, {'id': 17985, 'synset': 'natal_plum.n.01', 'name': 'natal_plum'}, {'id': 17986, 'synset': 'periwinkle.n.02', 'name': 'periwinkle'}, {'id': 17987, 'synset': 'ivory_tree.n.01', 'name': 'ivory_tree'}, {'id': 17988, 'synset': 'white_dipladenia.n.01', 'name': 'white_dipladenia'}, {'id': 17989, 'synset': 'chilean_jasmine.n.01', 'name': 'Chilean_jasmine'}, {'id': 17990, 'synset': 'oleander.n.01', 'name': 'oleander'}, {'id': 17991, 'synset': 'frangipani.n.01', 'name': 'frangipani'}, {'id': 17992, 'synset': 'west_indian_jasmine.n.01', 'name': 'West_Indian_jasmine'}, {'id': 17993, 'synset': 'rauwolfia.n.02', 'name': 'rauwolfia'}, {'id': 17994, 'synset': 'snakewood.n.01', 'name': 'snakewood'}, {'id': 17995, 'synset': 'strophanthus_kombe.n.01', 'name': 'Strophanthus_kombe'}, {'id': 17996, 'synset': 'yellow_oleander.n.01', 'name': 'yellow_oleander'}, {'id': 17997, 'synset': 'myrtle.n.01', 'name': 'myrtle'}, {'id': 17998, 'synset': 'large_periwinkle.n.01', 'name': 'large_periwinkle'}, {'id': 17999, 'synset': 'arum.n.02', 'name': 'arum'}, {'id': 18000, 'synset': 'cuckoopint.n.01', 'name': 'cuckoopint'}, {'id': 18001, 'synset': 'black_calla.n.01', 'name': 'black_calla'}, {'id': 18002, 'synset': 'calamus.n.02', 'name': 'calamus'}, {'id': 18003, 'synset': 'alocasia.n.01', 'name': 'alocasia'}, {'id': 18004, 'synset': 'giant_taro.n.01', 'name': 'giant_taro'}, {'id': 18005, 'synset': 'amorphophallus.n.01', 'name': 'amorphophallus'}, {'id': 18006, 'synset': 'pungapung.n.01', 'name': 'pungapung'}, {'id': 18007, 'synset': "devil's_tongue.n.01", 'name': "devil's_tongue"}, {'id': 18008, 'synset': 'anthurium.n.01', 'name': 'anthurium'}, {'id': 18009, 'synset': 'flamingo_flower.n.01', 'name': 'flamingo_flower'}, {'id': 18010, 'synset': 'jack-in-the-pulpit.n.01', 'name': 'jack-in-the-pulpit'}, {'id': 18011, 'synset': "friar's-cowl.n.01", 'name': "friar's-cowl"}, {'id': 18012, 'synset': 'caladium.n.01', 'name': 'caladium'}, {'id': 18013, 'synset': 'caladium_bicolor.n.01', 'name': 'Caladium_bicolor'}, {'id': 18014, 'synset': 'wild_calla.n.01', 'name': 'wild_calla'}, {'id': 18015, 'synset': 'taro.n.02', 'name': 'taro'}, {'id': 18016, 'synset': 'taro.n.01', 'name': 'taro'}, {'id': 18017, 'synset': 'cryptocoryne.n.01', 'name': 'cryptocoryne'}, {'id': 18018, 'synset': 'dracontium.n.01', 'name': 'dracontium'}, {'id': 18019, 'synset': 'golden_pothos.n.01', 'name': 'golden_pothos'}, {'id': 18020, 'synset': 'skunk_cabbage.n.02', 'name': 'skunk_cabbage'}, {'id': 18021, 'synset': 'monstera.n.01', 'name': 'monstera'}, {'id': 18022, 'synset': 'ceriman.n.01', 'name': 'ceriman'}, {'id': 18023, 'synset': 'nephthytis.n.01', 'name': 'nephthytis'}, {'id': 18024, 'synset': 'nephthytis_afzelii.n.01', 'name': 'Nephthytis_afzelii'}, {'id': 18025, 'synset': 'arrow_arum.n.01', 'name': 'arrow_arum'}, {'id': 18026, 'synset': 'green_arrow_arum.n.01', 'name': 'green_arrow_arum'}, {'id': 18027, 'synset': 'philodendron.n.01', 'name': 'philodendron'}, {'id': 18028, 'synset': 'pistia.n.01', 'name': 'pistia'}, {'id': 18029, 'synset': 'pothos.n.01', 'name': 'pothos'}, {'id': 18030, 'synset': 'spathiphyllum.n.01', 'name': 'spathiphyllum'}, {'id': 18031, 'synset': 'skunk_cabbage.n.01', 'name': 'skunk_cabbage'}, {'id': 18032, 'synset': 'yautia.n.01', 'name': 'yautia'}, {'id': 18033, 'synset': 'calla_lily.n.01', 'name': 'calla_lily'}, {'id': 18034, 'synset': 'pink_calla.n.01', 'name': 'pink_calla'}, {'id': 18035, 'synset': 'golden_calla.n.01', 'name': 'golden_calla'}, {'id': 18036, 'synset': 'duckweed.n.01', 'name': 'duckweed'}, {'id': 18037, 'synset': 'common_duckweed.n.01', 'name': 'common_duckweed'}, {'id': 18038, 'synset': 'star-duckweed.n.01', 'name': 'star-duckweed'}, {'id': 18039, 'synset': 'great_duckweed.n.01', 'name': 'great_duckweed'}, {'id': 18040, 'synset': 'watermeal.n.01', 'name': 'watermeal'}, {'id': 18041, 'synset': 'common_wolffia.n.01', 'name': 'common_wolffia'}, {'id': 18042, 'synset': 'aralia.n.01', 'name': 'aralia'}, {'id': 18043, 'synset': 'american_angelica_tree.n.01', 'name': 'American_angelica_tree'}, {'id': 18044, 'synset': 'american_spikenard.n.01', 'name': 'American_spikenard'}, {'id': 18045, 'synset': 'bristly_sarsaparilla.n.01', 'name': 'bristly_sarsaparilla'}, {'id': 18046, 'synset': 'japanese_angelica_tree.n.01', 'name': 'Japanese_angelica_tree'}, {'id': 18047, 'synset': 'chinese_angelica.n.01', 'name': 'Chinese_angelica'}, {'id': 18048, 'synset': 'ivy.n.01', 'name': 'ivy'}, {'id': 18049, 'synset': 'puka.n.02', 'name': 'puka'}, {'id': 18050, 'synset': 'ginseng.n.02', 'name': 'ginseng'}, {'id': 18051, 'synset': 'ginseng.n.01', 'name': 'ginseng'}, {'id': 18052, 'synset': 'umbrella_tree.n.01', 'name': 'umbrella_tree'}, {'id': 18053, 'synset': 'birthwort.n.01', 'name': 'birthwort'}, {'id': 18054, 'synset': "dutchman's-pipe.n.01", 'name': "Dutchman's-pipe"}, {'id': 18055, 'synset': 'virginia_snakeroot.n.01', 'name': 'Virginia_snakeroot'}, {'id': 18056, 'synset': 'canada_ginger.n.01', 'name': 'Canada_ginger'}, {'id': 18057, 'synset': 'heartleaf.n.02', 'name': 'heartleaf'}, {'id': 18058, 'synset': 'heartleaf.n.01', 'name': 'heartleaf'}, {'id': 18059, 'synset': 'asarabacca.n.01', 'name': 'asarabacca'}, {'id': 18060, 'synset': 'caryophyllaceous_plant.n.01', 'name': 'caryophyllaceous_plant'}, {'id': 18061, 'synset': 'corn_cockle.n.01', 'name': 'corn_cockle'}, {'id': 18062, 'synset': 'sandwort.n.03', 'name': 'sandwort'}, {'id': 18063, 'synset': 'mountain_sandwort.n.01', 'name': 'mountain_sandwort'}, {'id': 18064, 'synset': 'pine-barren_sandwort.n.01', 'name': 'pine-barren_sandwort'}, {'id': 18065, 'synset': 'seabeach_sandwort.n.01', 'name': 'seabeach_sandwort'}, {'id': 18066, 'synset': 'rock_sandwort.n.01', 'name': 'rock_sandwort'}, {'id': 18067, 'synset': 'thyme-leaved_sandwort.n.01', 'name': 'thyme-leaved_sandwort'}, {'id': 18068, 'synset': 'mouse-ear_chickweed.n.01', 'name': 'mouse-ear_chickweed'}, {'id': 18069, 'synset': 'snow-in-summer.n.02', 'name': 'snow-in-summer'}, {'id': 18070, 'synset': 'alpine_mouse-ear.n.01', 'name': 'Alpine_mouse-ear'}, {'id': 18071, 'synset': 'pink.n.02', 'name': 'pink'}, {'id': 18072, 'synset': 'sweet_william.n.01', 'name': 'sweet_William'}, {'id': 18073, 'synset': 'china_pink.n.01', 'name': 'china_pink'}, {'id': 18074, 'synset': 'japanese_pink.n.01', 'name': 'Japanese_pink'}, {'id': 18075, 'synset': 'maiden_pink.n.01', 'name': 'maiden_pink'}, {'id': 18076, 'synset': 'cheddar_pink.n.01', 'name': 'cheddar_pink'}, {'id': 18077, 'synset': 'button_pink.n.01', 'name': 'button_pink'}, {'id': 18078, 'synset': 'cottage_pink.n.01', 'name': 'cottage_pink'}, {'id': 18079, 'synset': 'fringed_pink.n.02', 'name': 'fringed_pink'}, {'id': 18080, 'synset': 'drypis.n.01', 'name': 'drypis'}, {'id': 18081, 'synset': "baby's_breath.n.01", 'name': "baby's_breath"}, {'id': 18082, 'synset': 'coral_necklace.n.01', 'name': 'coral_necklace'}, {'id': 18083, 'synset': 'lychnis.n.01', 'name': 'lychnis'}, {'id': 18084, 'synset': 'ragged_robin.n.01', 'name': 'ragged_robin'}, {'id': 18085, 'synset': 'scarlet_lychnis.n.01', 'name': 'scarlet_lychnis'}, {'id': 18086, 'synset': 'mullein_pink.n.01', 'name': 'mullein_pink'}, {'id': 18087, 'synset': 'sandwort.n.02', 'name': 'sandwort'}, {'id': 18088, 'synset': 'sandwort.n.01', 'name': 'sandwort'}, {'id': 18089, 'synset': 'soapwort.n.01', 'name': 'soapwort'}, {'id': 18090, 'synset': 'knawel.n.01', 'name': 'knawel'}, {'id': 18091, 'synset': 'silene.n.01', 'name': 'silene'}, {'id': 18092, 'synset': 'moss_campion.n.01', 'name': 'moss_campion'}, {'id': 18093, 'synset': 'wild_pink.n.02', 'name': 'wild_pink'}, {'id': 18094, 'synset': 'red_campion.n.01', 'name': 'red_campion'}, {'id': 18095, 'synset': 'white_campion.n.01', 'name': 'white_campion'}, {'id': 18096, 'synset': 'fire_pink.n.01', 'name': 'fire_pink'}, {'id': 18097, 'synset': 'bladder_campion.n.01', 'name': 'bladder_campion'}, {'id': 18098, 'synset': 'corn_spurry.n.01', 'name': 'corn_spurry'}, {'id': 18099, 'synset': 'sand_spurry.n.01', 'name': 'sand_spurry'}, {'id': 18100, 'synset': 'chickweed.n.01', 'name': 'chickweed'}, {'id': 18101, 'synset': 'common_chickweed.n.01', 'name': 'common_chickweed'}, {'id': 18102, 'synset': 'cowherb.n.01', 'name': 'cowherb'}, {'id': 18103, 'synset': 'hottentot_fig.n.01', 'name': 'Hottentot_fig'}, {'id': 18104, 'synset': 'livingstone_daisy.n.01', 'name': 'livingstone_daisy'}, {'id': 18105, 'synset': 'fig_marigold.n.01', 'name': 'fig_marigold'}, {'id': 18106, 'synset': 'ice_plant.n.01', 'name': 'ice_plant'}, {'id': 18107, 'synset': 'new_zealand_spinach.n.01', 'name': 'New_Zealand_spinach'}, {'id': 18108, 'synset': 'amaranth.n.02', 'name': 'amaranth'}, {'id': 18109, 'synset': 'amaranth.n.01', 'name': 'amaranth'}, {'id': 18110, 'synset': 'tumbleweed.n.04', 'name': 'tumbleweed'}, {'id': 18111, 'synset': "prince's-feather.n.02", 'name': "prince's-feather"}, {'id': 18112, 'synset': 'pigweed.n.02', 'name': 'pigweed'}, {'id': 18113, 'synset': 'thorny_amaranth.n.01', 'name': 'thorny_amaranth'}, {'id': 18114, 'synset': 'alligator_weed.n.01', 'name': 'alligator_weed'}, {'id': 18115, 'synset': 'cockscomb.n.01', 'name': 'cockscomb'}, {'id': 18116, 'synset': 'cottonweed.n.02', 'name': 'cottonweed'}, {'id': 18117, 'synset': 'globe_amaranth.n.01', 'name': 'globe_amaranth'}, {'id': 18118, 'synset': 'bloodleaf.n.01', 'name': 'bloodleaf'}, {'id': 18119, 'synset': 'saltwort.n.02', 'name': 'saltwort'}, {'id': 18120, 'synset': "lamb's-quarters.n.01", 'name': "lamb's-quarters"}, {'id': 18121, 'synset': 'good-king-henry.n.01', 'name': 'good-king-henry'}, {'id': 18122, 'synset': 'jerusalem_oak.n.01', 'name': 'Jerusalem_oak'}, {'id': 18123, 'synset': 'oak-leaved_goosefoot.n.01', 'name': 'oak-leaved_goosefoot'}, {'id': 18124, 'synset': 'sowbane.n.01', 'name': 'sowbane'}, {'id': 18125, 'synset': 'nettle-leaved_goosefoot.n.01', 'name': 'nettle-leaved_goosefoot'}, {'id': 18126, 'synset': 'red_goosefoot.n.01', 'name': 'red_goosefoot'}, {'id': 18127, 'synset': 'stinking_goosefoot.n.01', 'name': 'stinking_goosefoot'}, {'id': 18128, 'synset': 'orach.n.01', 'name': 'orach'}, {'id': 18129, 'synset': 'saltbush.n.01', 'name': 'saltbush'}, {'id': 18130, 'synset': 'garden_orache.n.01', 'name': 'garden_orache'}, {'id': 18131, 'synset': 'desert_holly.n.01', 'name': 'desert_holly'}, {'id': 18132, 'synset': 'quail_bush.n.01', 'name': 'quail_bush'}, {'id': 18133, 'synset': 'beet.n.01', 'name': 'beet'}, {'id': 18134, 'synset': 'beetroot.n.01', 'name': 'beetroot'}, {'id': 18135, 'synset': 'chard.n.01', 'name': 'chard'}, {'id': 18136, 'synset': 'mangel-wurzel.n.01', 'name': 'mangel-wurzel'}, {'id': 18137, 'synset': 'winged_pigweed.n.01', 'name': 'winged_pigweed'}, {'id': 18138, 'synset': 'halogeton.n.01', 'name': 'halogeton'}, {'id': 18139, 'synset': 'glasswort.n.02', 'name': 'glasswort'}, {'id': 18140, 'synset': 'saltwort.n.01', 'name': 'saltwort'}, {'id': 18141, 'synset': 'russian_thistle.n.01', 'name': 'Russian_thistle'}, {'id': 18142, 'synset': 'greasewood.n.01', 'name': 'greasewood'}, {'id': 18143, 'synset': 'scarlet_musk_flower.n.01', 'name': 'scarlet_musk_flower'}, {'id': 18144, 'synset': 'sand_verbena.n.01', 'name': 'sand_verbena'}, {'id': 18145, 'synset': 'sweet_sand_verbena.n.01', 'name': 'sweet_sand_verbena'}, {'id': 18146, 'synset': 'yellow_sand_verbena.n.01', 'name': 'yellow_sand_verbena'}, {'id': 18147, 'synset': 'beach_pancake.n.01', 'name': 'beach_pancake'}, {'id': 18148, 'synset': 'beach_sand_verbena.n.01', 'name': 'beach_sand_verbena'}, {'id': 18149, 'synset': 'desert_sand_verbena.n.01', 'name': 'desert_sand_verbena'}, {'id': 18150, 'synset': "trailing_four_o'clock.n.01", 'name': "trailing_four_o'clock"}, {'id': 18151, 'synset': 'bougainvillea.n.01', 'name': 'bougainvillea'}, {'id': 18152, 'synset': 'umbrellawort.n.01', 'name': 'umbrellawort'}, {'id': 18153, 'synset': "four_o'clock.n.01", 'name': "four_o'clock"}, {'id': 18154, 'synset': "common_four-o'clock.n.01", 'name': "common_four-o'clock"}, {'id': 18155, 'synset': "california_four_o'clock.n.01", 'name': "California_four_o'clock"}, {'id': 18156, 'synset': "sweet_four_o'clock.n.01", 'name': "sweet_four_o'clock"}, {'id': 18157, 'synset': "desert_four_o'clock.n.01", 'name': "desert_four_o'clock"}, {'id': 18158, 'synset': "mountain_four_o'clock.n.01", 'name': "mountain_four_o'clock"}, {'id': 18159, 'synset': 'cockspur.n.02', 'name': 'cockspur'}, {'id': 18160, 'synset': 'rattail_cactus.n.01', 'name': 'rattail_cactus'}, {'id': 18161, 'synset': 'saguaro.n.01', 'name': 'saguaro'}, {'id': 18162, 'synset': 'night-blooming_cereus.n.03', 'name': 'night-blooming_cereus'}, {'id': 18163, 'synset': 'echinocactus.n.01', 'name': 'echinocactus'}, {'id': 18164, 'synset': 'hedgehog_cactus.n.01', 'name': 'hedgehog_cactus'}, {'id': 18165, 'synset': 'golden_barrel_cactus.n.01', 'name': 'golden_barrel_cactus'}, {'id': 18166, 'synset': 'hedgehog_cereus.n.01', 'name': 'hedgehog_cereus'}, {'id': 18167, 'synset': 'rainbow_cactus.n.01', 'name': 'rainbow_cactus'}, {'id': 18168, 'synset': 'epiphyllum.n.01', 'name': 'epiphyllum'}, {'id': 18169, 'synset': 'barrel_cactus.n.01', 'name': 'barrel_cactus'}, {'id': 18170, 'synset': 'night-blooming_cereus.n.02', 'name': 'night-blooming_cereus'}, {'id': 18171, 'synset': 'chichipe.n.01', 'name': 'chichipe'}, {'id': 18172, 'synset': 'mescal.n.01', 'name': 'mescal'}, {'id': 18173, 'synset': 'mescal_button.n.01', 'name': 'mescal_button'}, {'id': 18174, 'synset': 'mammillaria.n.01', 'name': 'mammillaria'}, {'id': 18175, 'synset': 'feather_ball.n.01', 'name': 'feather_ball'}, {'id': 18176, 'synset': 'garambulla.n.01', 'name': 'garambulla'}, {'id': 18177, 'synset': "knowlton's_cactus.n.01", 'name': "Knowlton's_cactus"}, {'id': 18178, 'synset': 'nopal.n.02', 'name': 'nopal'}, {'id': 18179, 'synset': 'prickly_pear.n.01', 'name': 'prickly_pear'}, {'id': 18180, 'synset': 'cholla.n.01', 'name': 'cholla'}, {'id': 18181, 'synset': 'nopal.n.01', 'name': 'nopal'}, {'id': 18182, 'synset': 'tuna.n.01', 'name': 'tuna'}, {'id': 18183, 'synset': 'barbados_gooseberry.n.01', 'name': 'Barbados_gooseberry'}, {'id': 18184, 'synset': 'mistletoe_cactus.n.01', 'name': 'mistletoe_cactus'}, {'id': 18185, 'synset': 'christmas_cactus.n.01', 'name': 'Christmas_cactus'}, {'id': 18186, 'synset': 'night-blooming_cereus.n.01', 'name': 'night-blooming_cereus'}, {'id': 18187, 'synset': 'crab_cactus.n.01', 'name': 'crab_cactus'}, {'id': 18188, 'synset': 'pokeweed.n.01', 'name': 'pokeweed'}, {'id': 18189, 'synset': 'indian_poke.n.02', 'name': 'Indian_poke'}, {'id': 18190, 'synset': 'poke.n.01', 'name': 'poke'}, {'id': 18191, 'synset': 'ombu.n.01', 'name': 'ombu'}, {'id': 18192, 'synset': 'bloodberry.n.01', 'name': 'bloodberry'}, {'id': 18193, 'synset': 'portulaca.n.01', 'name': 'portulaca'}, {'id': 18194, 'synset': 'rose_moss.n.01', 'name': 'rose_moss'}, {'id': 18195, 'synset': 'common_purslane.n.01', 'name': 'common_purslane'}, {'id': 18196, 'synset': 'rock_purslane.n.01', 'name': 'rock_purslane'}, {'id': 18197, 'synset': 'red_maids.n.01', 'name': 'red_maids'}, {'id': 18198, 'synset': 'carolina_spring_beauty.n.01', 'name': 'Carolina_spring_beauty'}, {'id': 18199, 'synset': 'spring_beauty.n.01', 'name': 'spring_beauty'}, {'id': 18200, 'synset': 'virginia_spring_beauty.n.01', 'name': 'Virginia_spring_beauty'}, {'id': 18201, 'synset': 'siskiyou_lewisia.n.01', 'name': 'siskiyou_lewisia'}, {'id': 18202, 'synset': 'bitterroot.n.01', 'name': 'bitterroot'}, {'id': 18203, 'synset': 'broad-leaved_montia.n.01', 'name': 'broad-leaved_montia'}, {'id': 18204, 'synset': 'blinks.n.01', 'name': 'blinks'}, {'id': 18205, 'synset': 'toad_lily.n.01', 'name': 'toad_lily'}, {'id': 18206, 'synset': 'winter_purslane.n.01', 'name': 'winter_purslane'}, {'id': 18207, 'synset': 'flame_flower.n.02', 'name': 'flame_flower'}, {'id': 18208, 'synset': 'pigmy_talinum.n.01', 'name': 'pigmy_talinum'}, {'id': 18209, 'synset': 'jewels-of-opar.n.01', 'name': 'jewels-of-opar'}, {'id': 18210, 'synset': 'caper.n.01', 'name': 'caper'}, {'id': 18211, 'synset': 'native_pomegranate.n.01', 'name': 'native_pomegranate'}, {'id': 18212, 'synset': 'caper_tree.n.02', 'name': 'caper_tree'}, {'id': 18213, 'synset': 'caper_tree.n.01', 'name': 'caper_tree'}, {'id': 18214, 'synset': 'common_caper.n.01', 'name': 'common_caper'}, {'id': 18215, 'synset': 'spiderflower.n.01', 'name': 'spiderflower'}, {'id': 18216, 'synset': 'rocky_mountain_bee_plant.n.01', 'name': 'Rocky_Mountain_bee_plant'}, {'id': 18217, 'synset': 'clammyweed.n.01', 'name': 'clammyweed'}, {'id': 18218, 'synset': 'crucifer.n.01', 'name': 'crucifer'}, {'id': 18219, 'synset': 'cress.n.01', 'name': 'cress'}, {'id': 18220, 'synset': 'watercress.n.01', 'name': 'watercress'}, {'id': 18221, 'synset': 'stonecress.n.01', 'name': 'stonecress'}, {'id': 18222, 'synset': 'garlic_mustard.n.01', 'name': 'garlic_mustard'}, {'id': 18223, 'synset': 'alyssum.n.01', 'name': 'alyssum'}, {'id': 18224, 'synset': 'rose_of_jericho.n.02', 'name': 'rose_of_Jericho'}, {'id': 18225, 'synset': 'arabidopsis_thaliana.n.01', 'name': 'Arabidopsis_thaliana'}, {'id': 18226, 'synset': 'arabidopsis_lyrata.n.01', 'name': 'Arabidopsis_lyrata'}, {'id': 18227, 'synset': 'rock_cress.n.01', 'name': 'rock_cress'}, {'id': 18228, 'synset': 'sicklepod.n.02', 'name': 'sicklepod'}, {'id': 18229, 'synset': 'tower_mustard.n.01', 'name': 'tower_mustard'}, {'id': 18230, 'synset': 'horseradish.n.01', 'name': 'horseradish'}, {'id': 18231, 'synset': 'winter_cress.n.01', 'name': 'winter_cress'}, {'id': 18232, 'synset': 'yellow_rocket.n.01', 'name': 'yellow_rocket'}, {'id': 18233, 'synset': 'hoary_alison.n.01', 'name': 'hoary_alison'}, {'id': 18234, 'synset': 'buckler_mustard.n.01', 'name': 'buckler_mustard'}, {'id': 18235, 'synset': 'wild_cabbage.n.01', 'name': 'wild_cabbage'}, {'id': 18236, 'synset': 'cabbage.n.03', 'name': 'cabbage'}, {'id': 18237, 'synset': 'head_cabbage.n.01', 'name': 'head_cabbage'}, {'id': 18238, 'synset': 'savoy_cabbage.n.01', 'name': 'savoy_cabbage'}, {'id': 18239, 'synset': 'brussels_sprout.n.01', 'name': 'brussels_sprout'}, {'id': 18240, 'synset': 'cauliflower.n.01', 'name': 'cauliflower'}, {'id': 18241, 'synset': 'collard.n.01', 'name': 'collard'}, {'id': 18242, 'synset': 'kohlrabi.n.01', 'name': 'kohlrabi'}, {'id': 18243, 'synset': 'turnip_plant.n.01', 'name': 'turnip_plant'}, {'id': 18244, 'synset': 'rutabaga.n.02', 'name': 'rutabaga'}, {'id': 18245, 'synset': 'broccoli_raab.n.01', 'name': 'broccoli_raab'}, {'id': 18246, 'synset': 'mustard.n.01', 'name': 'mustard'}, {'id': 18247, 'synset': 'chinese_mustard.n.01', 'name': 'chinese_mustard'}, {'id': 18248, 'synset': 'bok_choy.n.01', 'name': 'bok_choy'}, {'id': 18249, 'synset': 'rape.n.01', 'name': 'rape'}, {'id': 18250, 'synset': 'rapeseed.n.01', 'name': 'rapeseed'}, {'id': 18251, 'synset': "shepherd's_purse.n.01", 'name': "shepherd's_purse"}, {'id': 18252, 'synset': "lady's_smock.n.01", 'name': "lady's_smock"}, {'id': 18253, 'synset': 'coral-root_bittercress.n.01', 'name': 'coral-root_bittercress'}, {'id': 18254, 'synset': 'crinkleroot.n.01', 'name': 'crinkleroot'}, {'id': 18255, 'synset': 'american_watercress.n.01', 'name': 'American_watercress'}, {'id': 18256, 'synset': 'spring_cress.n.01', 'name': 'spring_cress'}, {'id': 18257, 'synset': 'purple_cress.n.01', 'name': 'purple_cress'}, {'id': 18258, 'synset': 'wallflower.n.02', 'name': 'wallflower'}, {'id': 18259, 'synset': 'prairie_rocket.n.02', 'name': 'prairie_rocket'}, {'id': 18260, 'synset': 'scurvy_grass.n.01', 'name': 'scurvy_grass'}, {'id': 18261, 'synset': 'sea_kale.n.01', 'name': 'sea_kale'}, {'id': 18262, 'synset': 'tansy_mustard.n.01', 'name': 'tansy_mustard'}, {'id': 18263, 'synset': 'draba.n.01', 'name': 'draba'}, {'id': 18264, 'synset': 'wallflower.n.01', 'name': 'wallflower'}, {'id': 18265, 'synset': 'prairie_rocket.n.01', 'name': 'prairie_rocket'}, {'id': 18266, 'synset': 'siberian_wall_flower.n.01', 'name': 'Siberian_wall_flower'}, {'id': 18267, 'synset': 'western_wall_flower.n.01', 'name': 'western_wall_flower'}, {'id': 18268, 'synset': 'wormseed_mustard.n.01', 'name': 'wormseed_mustard'}, {'id': 18269, 'synset': 'heliophila.n.01', 'name': 'heliophila'}, {'id': 18270, 'synset': 'damask_violet.n.01', 'name': 'damask_violet'}, {'id': 18271, 'synset': 'tansy-leaved_rocket.n.01', 'name': 'tansy-leaved_rocket'}, {'id': 18272, 'synset': 'candytuft.n.01', 'name': 'candytuft'}, {'id': 18273, 'synset': 'woad.n.02', 'name': 'woad'}, {'id': 18274, 'synset': "dyer's_woad.n.01", 'name': "dyer's_woad"}, {'id': 18275, 'synset': 'bladderpod.n.04', 'name': 'bladderpod'}, {'id': 18276, 'synset': 'sweet_alyssum.n.01', 'name': 'sweet_alyssum'}, {'id': 18277, 'synset': 'malcolm_stock.n.01', 'name': 'Malcolm_stock'}, {'id': 18278, 'synset': 'virginian_stock.n.01', 'name': 'Virginian_stock'}, {'id': 18279, 'synset': 'stock.n.12', 'name': 'stock'}, {'id': 18280, 'synset': 'brompton_stock.n.01', 'name': 'brompton_stock'}, {'id': 18281, 'synset': 'bladderpod.n.03', 'name': 'bladderpod'}, {'id': 18282, 'synset': 'chamois_cress.n.01', 'name': 'chamois_cress'}, {'id': 18283, 'synset': 'radish_plant.n.01', 'name': 'radish_plant'}, {'id': 18284, 'synset': 'jointed_charlock.n.01', 'name': 'jointed_charlock'}, {'id': 18285, 'synset': 'radish.n.04', 'name': 'radish'}, {'id': 18286, 'synset': 'radish.n.02', 'name': 'radish'}, {'id': 18287, 'synset': 'marsh_cress.n.01', 'name': 'marsh_cress'}, {'id': 18288, 'synset': 'great_yellowcress.n.01', 'name': 'great_yellowcress'}, {'id': 18289, 'synset': 'schizopetalon.n.01', 'name': 'schizopetalon'}, {'id': 18290, 'synset': 'field_mustard.n.01', 'name': 'field_mustard'}, {'id': 18291, 'synset': 'hedge_mustard.n.01', 'name': 'hedge_mustard'}, {'id': 18292, 'synset': 'desert_plume.n.01', 'name': 'desert_plume'}, {'id': 18293, 'synset': 'pennycress.n.01', 'name': 'pennycress'}, {'id': 18294, 'synset': 'field_pennycress.n.01', 'name': 'field_pennycress'}, {'id': 18295, 'synset': 'fringepod.n.01', 'name': 'fringepod'}, {'id': 18296, 'synset': 'bladderpod.n.02', 'name': 'bladderpod'}, {'id': 18297, 'synset': 'wasabi.n.01', 'name': 'wasabi'}, {'id': 18298, 'synset': 'poppy.n.01', 'name': 'poppy'}, {'id': 18299, 'synset': 'iceland_poppy.n.02', 'name': 'Iceland_poppy'}, {'id': 18300, 'synset': 'western_poppy.n.01', 'name': 'western_poppy'}, {'id': 18301, 'synset': 'prickly_poppy.n.02', 'name': 'prickly_poppy'}, {'id': 18302, 'synset': 'iceland_poppy.n.01', 'name': 'Iceland_poppy'}, {'id': 18303, 'synset': 'oriental_poppy.n.01', 'name': 'oriental_poppy'}, {'id': 18304, 'synset': 'corn_poppy.n.01', 'name': 'corn_poppy'}, {'id': 18305, 'synset': 'opium_poppy.n.01', 'name': 'opium_poppy'}, {'id': 18306, 'synset': 'prickly_poppy.n.01', 'name': 'prickly_poppy'}, {'id': 18307, 'synset': 'mexican_poppy.n.01', 'name': 'Mexican_poppy'}, {'id': 18308, 'synset': 'bocconia.n.02', 'name': 'bocconia'}, {'id': 18309, 'synset': 'celandine.n.02', 'name': 'celandine'}, {'id': 18310, 'synset': 'corydalis.n.01', 'name': 'corydalis'}, {'id': 18311, 'synset': 'climbing_corydalis.n.01', 'name': 'climbing_corydalis'}, {'id': 18312, 'synset': 'california_poppy.n.01', 'name': 'California_poppy'}, {'id': 18313, 'synset': 'horn_poppy.n.01', 'name': 'horn_poppy'}, {'id': 18314, 'synset': 'golden_cup.n.01', 'name': 'golden_cup'}, {'id': 18315, 'synset': 'plume_poppy.n.01', 'name': 'plume_poppy'}, {'id': 18316, 'synset': 'blue_poppy.n.01', 'name': 'blue_poppy'}, {'id': 18317, 'synset': 'welsh_poppy.n.01', 'name': 'Welsh_poppy'}, {'id': 18318, 'synset': 'creamcups.n.01', 'name': 'creamcups'}, {'id': 18319, 'synset': 'matilija_poppy.n.01', 'name': 'matilija_poppy'}, {'id': 18320, 'synset': 'wind_poppy.n.01', 'name': 'wind_poppy'}, {'id': 18321, 'synset': 'celandine_poppy.n.01', 'name': 'celandine_poppy'}, {'id': 18322, 'synset': 'climbing_fumitory.n.01', 'name': 'climbing_fumitory'}, {'id': 18323, 'synset': 'bleeding_heart.n.01', 'name': 'bleeding_heart'}, {'id': 18324, 'synset': "dutchman's_breeches.n.01", 'name': "Dutchman's_breeches"}, {'id': 18325, 'synset': 'squirrel_corn.n.01', 'name': 'squirrel_corn'}, {'id': 18326, 'synset': 'composite.n.02', 'name': 'composite'}, {'id': 18327, 'synset': 'compass_plant.n.02', 'name': 'compass_plant'}, {'id': 18328, 'synset': 'everlasting.n.01', 'name': 'everlasting'}, {'id': 18329, 'synset': 'achillea.n.01', 'name': 'achillea'}, {'id': 18330, 'synset': 'yarrow.n.01', 'name': 'yarrow'}, {'id': 18331, 'synset': 'pink-and-white_everlasting.n.01', 'name': 'pink-and-white_everlasting'}, {'id': 18332, 'synset': 'white_snakeroot.n.01', 'name': 'white_snakeroot'}, {'id': 18333, 'synset': 'ageratum.n.02', 'name': 'ageratum'}, {'id': 18334, 'synset': 'common_ageratum.n.01', 'name': 'common_ageratum'}, {'id': 18335, 'synset': 'sweet_sultan.n.03', 'name': 'sweet_sultan'}, {'id': 18336, 'synset': 'ragweed.n.02', 'name': 'ragweed'}, {'id': 18337, 'synset': 'common_ragweed.n.01', 'name': 'common_ragweed'}, {'id': 18338, 'synset': 'great_ragweed.n.01', 'name': 'great_ragweed'}, {'id': 18339, 'synset': 'western_ragweed.n.01', 'name': 'western_ragweed'}, {'id': 18340, 'synset': 'ammobium.n.01', 'name': 'ammobium'}, {'id': 18341, 'synset': 'winged_everlasting.n.01', 'name': 'winged_everlasting'}, {'id': 18342, 'synset': 'pellitory.n.02', 'name': 'pellitory'}, {'id': 18343, 'synset': 'pearly_everlasting.n.01', 'name': 'pearly_everlasting'}, {'id': 18344, 'synset': 'andryala.n.01', 'name': 'andryala'}, {'id': 18345, 'synset': 'plantain-leaved_pussytoes.n.01', 'name': 'plantain-leaved_pussytoes'}, {'id': 18346, 'synset': 'field_pussytoes.n.01', 'name': 'field_pussytoes'}, {'id': 18347, 'synset': 'solitary_pussytoes.n.01', 'name': 'solitary_pussytoes'}, {'id': 18348, 'synset': 'mountain_everlasting.n.01', 'name': 'mountain_everlasting'}, {'id': 18349, 'synset': 'mayweed.n.01', 'name': 'mayweed'}, {'id': 18350, 'synset': 'yellow_chamomile.n.01', 'name': 'yellow_chamomile'}, {'id': 18351, 'synset': 'corn_chamomile.n.01', 'name': 'corn_chamomile'}, {'id': 18352, 'synset': 'woolly_daisy.n.01', 'name': 'woolly_daisy'}, {'id': 18353, 'synset': 'burdock.n.01', 'name': 'burdock'}, {'id': 18354, 'synset': 'great_burdock.n.01', 'name': 'great_burdock'}, {'id': 18355, 'synset': 'african_daisy.n.03', 'name': 'African_daisy'}, {'id': 18356, 'synset': 'blue-eyed_african_daisy.n.01', 'name': 'blue-eyed_African_daisy'}, {'id': 18357, 'synset': 'marguerite.n.02', 'name': 'marguerite'}, {'id': 18358, 'synset': 'silversword.n.01', 'name': 'silversword'}, {'id': 18359, 'synset': 'arnica.n.02', 'name': 'arnica'}, {'id': 18360, 'synset': 'heartleaf_arnica.n.01', 'name': 'heartleaf_arnica'}, {'id': 18361, 'synset': 'arnica_montana.n.01', 'name': 'Arnica_montana'}, {'id': 18362, 'synset': 'lamb_succory.n.01', 'name': 'lamb_succory'}, {'id': 18363, 'synset': 'artemisia.n.01', 'name': 'artemisia'}, {'id': 18364, 'synset': 'mugwort.n.01', 'name': 'mugwort'}, {'id': 18365, 'synset': 'sweet_wormwood.n.01', 'name': 'sweet_wormwood'}, {'id': 18366, 'synset': 'field_wormwood.n.01', 'name': 'field_wormwood'}, {'id': 18367, 'synset': 'tarragon.n.01', 'name': 'tarragon'}, {'id': 18368, 'synset': 'sand_sage.n.01', 'name': 'sand_sage'}, {'id': 18369, 'synset': 'wormwood_sage.n.01', 'name': 'wormwood_sage'}, {'id': 18370, 'synset': 'western_mugwort.n.01', 'name': 'western_mugwort'}, {'id': 18371, 'synset': 'roman_wormwood.n.01', 'name': 'Roman_wormwood'}, {'id': 18372, 'synset': 'bud_brush.n.01', 'name': 'bud_brush'}, {'id': 18373, 'synset': 'common_mugwort.n.01', 'name': 'common_mugwort'}, {'id': 18374, 'synset': 'aster.n.01', 'name': 'aster'}, {'id': 18375, 'synset': 'wood_aster.n.01', 'name': 'wood_aster'}, {'id': 18376, 'synset': 'whorled_aster.n.01', 'name': 'whorled_aster'}, {'id': 18377, 'synset': 'heath_aster.n.02', 'name': 'heath_aster'}, {'id': 18378, 'synset': 'heart-leaved_aster.n.01', 'name': 'heart-leaved_aster'}, {'id': 18379, 'synset': 'white_wood_aster.n.01', 'name': 'white_wood_aster'}, {'id': 18380, 'synset': 'bushy_aster.n.01', 'name': 'bushy_aster'}, {'id': 18381, 'synset': 'heath_aster.n.01', 'name': 'heath_aster'}, {'id': 18382, 'synset': 'white_prairie_aster.n.01', 'name': 'white_prairie_aster'}, {'id': 18383, 'synset': 'stiff_aster.n.01', 'name': 'stiff_aster'}, {'id': 18384, 'synset': 'goldilocks.n.01', 'name': 'goldilocks'}, {'id': 18385, 'synset': 'large-leaved_aster.n.01', 'name': 'large-leaved_aster'}, {'id': 18386, 'synset': 'new_england_aster.n.01', 'name': 'New_England_aster'}, {'id': 18387, 'synset': 'michaelmas_daisy.n.01', 'name': 'Michaelmas_daisy'}, {'id': 18388, 'synset': 'upland_white_aster.n.01', 'name': 'upland_white_aster'}, {'id': 18389, 'synset': "short's_aster.n.01", 'name': "Short's_aster"}, {'id': 18390, 'synset': 'sea_aster.n.01', 'name': 'sea_aster'}, {'id': 18391, 'synset': 'prairie_aster.n.01', 'name': 'prairie_aster'}, {'id': 18392, 'synset': 'annual_salt-marsh_aster.n.01', 'name': 'annual_salt-marsh_aster'}, {'id': 18393, 'synset': 'aromatic_aster.n.01', 'name': 'aromatic_aster'}, {'id': 18394, 'synset': 'arrow_leaved_aster.n.01', 'name': 'arrow_leaved_aster'}, {'id': 18395, 'synset': 'azure_aster.n.01', 'name': 'azure_aster'}, {'id': 18396, 'synset': 'bog_aster.n.01', 'name': 'bog_aster'}, {'id': 18397, 'synset': 'crooked-stemmed_aster.n.01', 'name': 'crooked-stemmed_aster'}, {'id': 18398, 'synset': 'eastern_silvery_aster.n.01', 'name': 'Eastern_silvery_aster'}, {'id': 18399, 'synset': 'flat-topped_white_aster.n.01', 'name': 'flat-topped_white_aster'}, {'id': 18400, 'synset': 'late_purple_aster.n.01', 'name': 'late_purple_aster'}, {'id': 18401, 'synset': 'panicled_aster.n.01', 'name': 'panicled_aster'}, {'id': 18402, 'synset': 'perennial_salt_marsh_aster.n.01', 'name': 'perennial_salt_marsh_aster'}, {'id': 18403, 'synset': 'purple-stemmed_aster.n.01', 'name': 'purple-stemmed_aster'}, {'id': 18404, 'synset': 'rough-leaved_aster.n.01', 'name': 'rough-leaved_aster'}, {'id': 18405, 'synset': 'rush_aster.n.01', 'name': 'rush_aster'}, {'id': 18406, 'synset': "schreiber's_aster.n.01", 'name': "Schreiber's_aster"}, {'id': 18407, 'synset': 'small_white_aster.n.01', 'name': 'small_white_aster'}, {'id': 18408, 'synset': 'smooth_aster.n.01', 'name': 'smooth_aster'}, {'id': 18409, 'synset': 'southern_aster.n.01', 'name': 'southern_aster'}, {'id': 18410, 'synset': 'starved_aster.n.01', 'name': 'starved_aster'}, {'id': 18411, 'synset': "tradescant's_aster.n.01", 'name': "tradescant's_aster"}, {'id': 18412, 'synset': 'wavy-leaved_aster.n.01', 'name': 'wavy-leaved_aster'}, {'id': 18413, 'synset': 'western_silvery_aster.n.01', 'name': 'Western_silvery_aster'}, {'id': 18414, 'synset': 'willow_aster.n.01', 'name': 'willow_aster'}, {'id': 18415, 'synset': 'ayapana.n.01', 'name': 'ayapana'}, {'id': 18416, 'synset': 'mule_fat.n.01', 'name': 'mule_fat'}, {'id': 18417, 'synset': 'balsamroot.n.01', 'name': 'balsamroot'}, {'id': 18418, 'synset': 'daisy.n.01', 'name': 'daisy'}, {'id': 18419, 'synset': 'common_daisy.n.01', 'name': 'common_daisy'}, {'id': 18420, 'synset': 'bur_marigold.n.01', 'name': 'bur_marigold'}, {'id': 18421, 'synset': 'spanish_needles.n.02', 'name': 'Spanish_needles'}, {'id': 18422, 'synset': 'tickseed_sunflower.n.01', 'name': 'tickseed_sunflower'}, {'id': 18423, 'synset': 'european_beggar-ticks.n.01', 'name': 'European_beggar-ticks'}, {'id': 18424, 'synset': 'slender_knapweed.n.01', 'name': 'slender_knapweed'}, {'id': 18425, 'synset': 'false_chamomile.n.01', 'name': 'false_chamomile'}, {'id': 18426, 'synset': 'swan_river_daisy.n.01', 'name': 'Swan_River_daisy'}, {'id': 18427, 'synset': 'woodland_oxeye.n.01', 'name': 'woodland_oxeye'}, {'id': 18428, 'synset': 'indian_plantain.n.01', 'name': 'Indian_plantain'}, {'id': 18429, 'synset': 'calendula.n.01', 'name': 'calendula'}, {'id': 18430, 'synset': 'common_marigold.n.01', 'name': 'common_marigold'}, {'id': 18431, 'synset': 'china_aster.n.01', 'name': 'China_aster'}, {'id': 18432, 'synset': 'thistle.n.01', 'name': 'thistle'}, {'id': 18433, 'synset': 'welted_thistle.n.01', 'name': 'welted_thistle'}, {'id': 18434, 'synset': 'musk_thistle.n.01', 'name': 'musk_thistle'}, {'id': 18435, 'synset': 'carline_thistle.n.01', 'name': 'carline_thistle'}, {'id': 18436, 'synset': 'stemless_carline_thistle.n.01', 'name': 'stemless_carline_thistle'}, {'id': 18437, 'synset': 'common_carline_thistle.n.01', 'name': 'common_carline_thistle'}, {'id': 18438, 'synset': 'safflower.n.01', 'name': 'safflower'}, {'id': 18439, 'synset': 'safflower_seed.n.01', 'name': 'safflower_seed'}, {'id': 18440, 'synset': 'catananche.n.01', 'name': 'catananche'}, {'id': 18441, 'synset': 'blue_succory.n.01', 'name': 'blue_succory'}, {'id': 18442, 'synset': 'centaury.n.02', 'name': 'centaury'}, {'id': 18443, 'synset': 'dusty_miller.n.03', 'name': 'dusty_miller'}, {'id': 18444, 'synset': 'cornflower.n.02', 'name': 'cornflower'}, {'id': 18445, 'synset': 'star-thistle.n.01', 'name': 'star-thistle'}, {'id': 18446, 'synset': 'knapweed.n.01', 'name': 'knapweed'}, {'id': 18447, 'synset': 'sweet_sultan.n.02', 'name': 'sweet_sultan'}, {'id': 18448, 'synset': 'great_knapweed.n.01', 'name': 'great_knapweed'}, {'id': 18449, 'synset': "barnaby's_thistle.n.01", 'name': "Barnaby's_thistle"}, {'id': 18450, 'synset': 'chamomile.n.01', 'name': 'chamomile'}, {'id': 18451, 'synset': 'chaenactis.n.01', 'name': 'chaenactis'}, {'id': 18452, 'synset': 'chrysanthemum.n.02', 'name': 'chrysanthemum'}, {'id': 18453, 'synset': 'corn_marigold.n.01', 'name': 'corn_marigold'}, {'id': 18454, 'synset': 'crown_daisy.n.01', 'name': 'crown_daisy'}, {'id': 18455, 'synset': 'chop-suey_greens.n.01', 'name': 'chop-suey_greens'}, {'id': 18456, 'synset': 'golden_aster.n.01', 'name': 'golden_aster'}, {'id': 18457, 'synset': 'maryland_golden_aster.n.01', 'name': 'Maryland_golden_aster'}, {'id': 18458, 'synset': 'goldenbush.n.02', 'name': 'goldenbush'}, {'id': 18459, 'synset': 'rabbit_brush.n.01', 'name': 'rabbit_brush'}, {'id': 18460, 'synset': 'chicory.n.02', 'name': 'chicory'}, {'id': 18461, 'synset': 'endive.n.01', 'name': 'endive'}, {'id': 18462, 'synset': 'chicory.n.01', 'name': 'chicory'}, {'id': 18463, 'synset': 'plume_thistle.n.01', 'name': 'plume_thistle'}, {'id': 18464, 'synset': 'canada_thistle.n.01', 'name': 'Canada_thistle'}, {'id': 18465, 'synset': 'field_thistle.n.01', 'name': 'field_thistle'}, {'id': 18466, 'synset': 'woolly_thistle.n.02', 'name': 'woolly_thistle'}, {'id': 18467, 'synset': 'european_woolly_thistle.n.01', 'name': 'European_woolly_thistle'}, {'id': 18468, 'synset': 'melancholy_thistle.n.01', 'name': 'melancholy_thistle'}, {'id': 18469, 'synset': 'brook_thistle.n.01', 'name': 'brook_thistle'}, {'id': 18470, 'synset': 'bull_thistle.n.01', 'name': 'bull_thistle'}, {'id': 18471, 'synset': 'blessed_thistle.n.02', 'name': 'blessed_thistle'}, {'id': 18472, 'synset': 'mistflower.n.01', 'name': 'mistflower'}, {'id': 18473, 'synset': 'horseweed.n.02', 'name': 'horseweed'}, {'id': 18474, 'synset': 'coreopsis.n.01', 'name': 'coreopsis'}, {'id': 18475, 'synset': 'giant_coreopsis.n.01', 'name': 'giant_coreopsis'}, {'id': 18476, 'synset': 'sea_dahlia.n.01', 'name': 'sea_dahlia'}, {'id': 18477, 'synset': 'calliopsis.n.01', 'name': 'calliopsis'}, {'id': 18478, 'synset': 'cosmos.n.02', 'name': 'cosmos'}, {'id': 18479, 'synset': 'brass_buttons.n.01', 'name': 'brass_buttons'}, {'id': 18480, 'synset': 'billy_buttons.n.01', 'name': 'billy_buttons'}, {'id': 18481, 'synset': "hawk's-beard.n.01", 'name': "hawk's-beard"}, {'id': 18482, 'synset': 'artichoke.n.01', 'name': 'artichoke'}, {'id': 18483, 'synset': 'cardoon.n.01', 'name': 'cardoon'}, {'id': 18484, 'synset': 'dahlia.n.01', 'name': 'dahlia'}, {'id': 18485, 'synset': 'german_ivy.n.01', 'name': 'German_ivy'}, {'id': 18486, 'synset': "florist's_chrysanthemum.n.01", 'name': "florist's_chrysanthemum"}, {'id': 18487, 'synset': 'cape_marigold.n.01', 'name': 'cape_marigold'}, {'id': 18488, 'synset': "leopard's-bane.n.01", 'name': "leopard's-bane"}, {'id': 18489, 'synset': 'coneflower.n.03', 'name': 'coneflower'}, {'id': 18490, 'synset': 'globe_thistle.n.01', 'name': 'globe_thistle'}, {'id': 18491, 'synset': "elephant's-foot.n.02", 'name': "elephant's-foot"}, {'id': 18492, 'synset': 'tassel_flower.n.01', 'name': 'tassel_flower'}, {'id': 18493, 'synset': 'brittlebush.n.01', 'name': 'brittlebush'}, {'id': 18494, 'synset': 'sunray.n.02', 'name': 'sunray'}, {'id': 18495, 'synset': 'engelmannia.n.01', 'name': 'engelmannia'}, {'id': 18496, 'synset': 'fireweed.n.02', 'name': 'fireweed'}, {'id': 18497, 'synset': 'fleabane.n.02', 'name': 'fleabane'}, {'id': 18498, 'synset': 'blue_fleabane.n.01', 'name': 'blue_fleabane'}, {'id': 18499, 'synset': 'daisy_fleabane.n.01', 'name': 'daisy_fleabane'}, {'id': 18500, 'synset': 'orange_daisy.n.01', 'name': 'orange_daisy'}, {'id': 18501, 'synset': 'spreading_fleabane.n.01', 'name': 'spreading_fleabane'}, {'id': 18502, 'synset': 'seaside_daisy.n.01', 'name': 'seaside_daisy'}, {'id': 18503, 'synset': 'philadelphia_fleabane.n.01', 'name': 'Philadelphia_fleabane'}, {'id': 18504, 'synset': "robin's_plantain.n.01", 'name': "robin's_plantain"}, {'id': 18505, 'synset': 'showy_daisy.n.01', 'name': 'showy_daisy'}, {'id': 18506, 'synset': 'woolly_sunflower.n.01', 'name': 'woolly_sunflower'}, {'id': 18507, 'synset': 'golden_yarrow.n.01', 'name': 'golden_yarrow'}, {'id': 18508, 'synset': 'dog_fennel.n.01', 'name': 'dog_fennel'}, {'id': 18509, 'synset': 'joe-pye_weed.n.02', 'name': 'Joe-Pye_weed'}, {'id': 18510, 'synset': 'boneset.n.02', 'name': 'boneset'}, {'id': 18511, 'synset': 'joe-pye_weed.n.01', 'name': 'Joe-Pye_weed'}, {'id': 18512, 'synset': 'blue_daisy.n.01', 'name': 'blue_daisy'}, {'id': 18513, 'synset': 'kingfisher_daisy.n.01', 'name': 'kingfisher_daisy'}, {'id': 18514, 'synset': 'cotton_rose.n.02', 'name': 'cotton_rose'}, {'id': 18515, 'synset': 'herba_impia.n.01', 'name': 'herba_impia'}, {'id': 18516, 'synset': 'gaillardia.n.01', 'name': 'gaillardia'}, {'id': 18517, 'synset': 'gazania.n.01', 'name': 'gazania'}, {'id': 18518, 'synset': 'treasure_flower.n.01', 'name': 'treasure_flower'}, {'id': 18519, 'synset': 'african_daisy.n.02', 'name': 'African_daisy'}, {'id': 18520, 'synset': 'barberton_daisy.n.01', 'name': 'Barberton_daisy'}, {'id': 18521, 'synset': 'desert_sunflower.n.01', 'name': 'desert_sunflower'}, {'id': 18522, 'synset': 'cudweed.n.01', 'name': 'cudweed'}, {'id': 18523, 'synset': 'chafeweed.n.01', 'name': 'chafeweed'}, {'id': 18524, 'synset': 'gumweed.n.01', 'name': 'gumweed'}, {'id': 18525, 'synset': 'grindelia_robusta.n.01', 'name': 'Grindelia_robusta'}, {'id': 18526, 'synset': 'curlycup_gumweed.n.01', 'name': 'curlycup_gumweed'}, {'id': 18527, 'synset': 'little-head_snakeweed.n.01', 'name': 'little-head_snakeweed'}, {'id': 18528, 'synset': 'rabbitweed.n.01', 'name': 'rabbitweed'}, {'id': 18529, 'synset': 'broomweed.n.01', 'name': 'broomweed'}, {'id': 18530, 'synset': 'velvet_plant.n.02', 'name': 'velvet_plant'}, {'id': 18531, 'synset': 'goldenbush.n.01', 'name': 'goldenbush'}, {'id': 18532, 'synset': 'camphor_daisy.n.01', 'name': 'camphor_daisy'}, {'id': 18533, 'synset': 'yellow_spiny_daisy.n.01', 'name': 'yellow_spiny_daisy'}, {'id': 18534, 'synset': 'hoary_golden_bush.n.01', 'name': 'hoary_golden_bush'}, {'id': 18535, 'synset': 'sneezeweed.n.01', 'name': 'sneezeweed'}, {'id': 18536, 'synset': 'orange_sneezeweed.n.01', 'name': 'orange_sneezeweed'}, {'id': 18537, 'synset': 'rosilla.n.01', 'name': 'rosilla'}, {'id': 18538, 'synset': 'swamp_sunflower.n.01', 'name': 'swamp_sunflower'}, {'id': 18539, 'synset': 'common_sunflower.n.01', 'name': 'common_sunflower'}, {'id': 18540, 'synset': 'giant_sunflower.n.01', 'name': 'giant_sunflower'}, {'id': 18541, 'synset': 'showy_sunflower.n.01', 'name': 'showy_sunflower'}, {'id': 18542, 'synset': "maximilian's_sunflower.n.01", 'name': "Maximilian's_sunflower"}, {'id': 18543, 'synset': 'prairie_sunflower.n.01', 'name': 'prairie_sunflower'}, {'id': 18544, 'synset': 'jerusalem_artichoke.n.02', 'name': 'Jerusalem_artichoke'}, {'id': 18545, 'synset': 'jerusalem_artichoke.n.01', 'name': 'Jerusalem_artichoke'}, {'id': 18546, 'synset': 'strawflower.n.03', 'name': 'strawflower'}, {'id': 18547, 'synset': 'heliopsis.n.01', 'name': 'heliopsis'}, {'id': 18548, 'synset': 'strawflower.n.02', 'name': 'strawflower'}, {'id': 18549, 'synset': 'hairy_golden_aster.n.01', 'name': 'hairy_golden_aster'}, {'id': 18550, 'synset': 'hawkweed.n.02', 'name': 'hawkweed'}, {'id': 18551, 'synset': 'rattlesnake_weed.n.01', 'name': 'rattlesnake_weed'}, {'id': 18552, 'synset': 'alpine_coltsfoot.n.01', 'name': 'alpine_coltsfoot'}, {'id': 18553, 'synset': 'alpine_gold.n.01', 'name': 'alpine_gold'}, {'id': 18554, 'synset': 'dwarf_hulsea.n.01', 'name': 'dwarf_hulsea'}, {'id': 18555, 'synset': "cat's-ear.n.02", 'name': "cat's-ear"}, {'id': 18556, 'synset': 'inula.n.01', 'name': 'inula'}, {'id': 18557, 'synset': 'marsh_elder.n.01', 'name': 'marsh_elder'}, {'id': 18558, 'synset': 'burweed_marsh_elder.n.01', 'name': 'burweed_marsh_elder'}, {'id': 18559, 'synset': 'krigia.n.01', 'name': 'krigia'}, {'id': 18560, 'synset': 'dwarf_dandelion.n.01', 'name': 'dwarf_dandelion'}, {'id': 18561, 'synset': 'garden_lettuce.n.01', 'name': 'garden_lettuce'}, {'id': 18562, 'synset': 'cos_lettuce.n.01', 'name': 'cos_lettuce'}, {'id': 18563, 'synset': 'leaf_lettuce.n.01', 'name': 'leaf_lettuce'}, {'id': 18564, 'synset': 'celtuce.n.01', 'name': 'celtuce'}, {'id': 18565, 'synset': 'prickly_lettuce.n.01', 'name': 'prickly_lettuce'}, {'id': 18566, 'synset': 'goldfields.n.01', 'name': 'goldfields'}, {'id': 18567, 'synset': 'tidytips.n.01', 'name': 'tidytips'}, {'id': 18568, 'synset': 'hawkbit.n.01', 'name': 'hawkbit'}, {'id': 18569, 'synset': 'fall_dandelion.n.01', 'name': 'fall_dandelion'}, {'id': 18570, 'synset': 'edelweiss.n.01', 'name': 'edelweiss'}, {'id': 18571, 'synset': 'oxeye_daisy.n.02', 'name': 'oxeye_daisy'}, {'id': 18572, 'synset': 'oxeye_daisy.n.01', 'name': 'oxeye_daisy'}, {'id': 18573, 'synset': 'shasta_daisy.n.01', 'name': 'shasta_daisy'}, {'id': 18574, 'synset': 'pyrenees_daisy.n.01', 'name': 'Pyrenees_daisy'}, {'id': 18575, 'synset': 'north_island_edelweiss.n.01', 'name': 'north_island_edelweiss'}, {'id': 18576, 'synset': 'blazing_star.n.02', 'name': 'blazing_star'}, {'id': 18577, 'synset': 'dotted_gayfeather.n.01', 'name': 'dotted_gayfeather'}, {'id': 18578, 'synset': 'dense_blazing_star.n.01', 'name': 'dense_blazing_star'}, {'id': 18579, 'synset': 'texas_star.n.02', 'name': 'Texas_star'}, {'id': 18580, 'synset': 'african_daisy.n.01', 'name': 'African_daisy'}, {'id': 18581, 'synset': 'tahoka_daisy.n.01', 'name': 'tahoka_daisy'}, {'id': 18582, 'synset': 'sticky_aster.n.01', 'name': 'sticky_aster'}, {'id': 18583, 'synset': 'mojave_aster.n.01', 'name': 'Mojave_aster'}, {'id': 18584, 'synset': 'tarweed.n.01', 'name': 'tarweed'}, {'id': 18585, 'synset': 'sweet_false_chamomile.n.01', 'name': 'sweet_false_chamomile'}, {'id': 18586, 'synset': 'pineapple_weed.n.01', 'name': 'pineapple_weed'}, {'id': 18587, 'synset': 'climbing_hempweed.n.01', 'name': 'climbing_hempweed'}, {'id': 18588, 'synset': 'mutisia.n.01', 'name': 'mutisia'}, {'id': 18589, 'synset': 'rattlesnake_root.n.02', 'name': 'rattlesnake_root'}, {'id': 18590, 'synset': 'white_lettuce.n.01', 'name': 'white_lettuce'}, {'id': 18591, 'synset': 'daisybush.n.01', 'name': 'daisybush'}, {'id': 18592, 'synset': 'new_zealand_daisybush.n.01', 'name': 'New_Zealand_daisybush'}, {'id': 18593, 'synset': 'cotton_thistle.n.01', 'name': 'cotton_thistle'}, {'id': 18594, 'synset': 'othonna.n.01', 'name': 'othonna'}, {'id': 18595, 'synset': 'cascade_everlasting.n.01', 'name': 'cascade_everlasting'}, {'id': 18596, 'synset': 'butterweed.n.02', 'name': 'butterweed'}, {'id': 18597, 'synset': 'american_feverfew.n.01', 'name': 'American_feverfew'}, {'id': 18598, 'synset': 'cineraria.n.01', 'name': 'cineraria'}, {'id': 18599, 'synset': "florest's_cineraria.n.01", 'name': "florest's_cineraria"}, {'id': 18600, 'synset': 'butterbur.n.01', 'name': 'butterbur'}, {'id': 18601, 'synset': 'winter_heliotrope.n.01', 'name': 'winter_heliotrope'}, {'id': 18602, 'synset': 'sweet_coltsfoot.n.01', 'name': 'sweet_coltsfoot'}, {'id': 18603, 'synset': 'oxtongue.n.01', 'name': 'oxtongue'}, {'id': 18604, 'synset': 'hawkweed.n.01', 'name': 'hawkweed'}, {'id': 18605, 'synset': 'mouse-ear_hawkweed.n.01', 'name': 'mouse-ear_hawkweed'}, {'id': 18606, 'synset': 'stevia.n.02', 'name': 'stevia'}, {'id': 18607, 'synset': 'rattlesnake_root.n.01', 'name': 'rattlesnake_root'}, {'id': 18608, 'synset': 'fleabane.n.01', 'name': 'fleabane'}, {'id': 18609, 'synset': 'sheep_plant.n.01', 'name': 'sheep_plant'}, {'id': 18610, 'synset': 'coneflower.n.02', 'name': 'coneflower'}, {'id': 18611, 'synset': 'mexican_hat.n.01', 'name': 'Mexican_hat'}, {'id': 18612, 'synset': 'long-head_coneflower.n.01', 'name': 'long-head_coneflower'}, {'id': 18613, 'synset': 'prairie_coneflower.n.01', 'name': 'prairie_coneflower'}, {'id': 18614, 'synset': 'swan_river_everlasting.n.01', 'name': 'Swan_River_everlasting'}, {'id': 18615, 'synset': 'coneflower.n.01', 'name': 'coneflower'}, {'id': 18616, 'synset': 'black-eyed_susan.n.03', 'name': 'black-eyed_Susan'}, {'id': 18617, 'synset': 'cutleaved_coneflower.n.01', 'name': 'cutleaved_coneflower'}, {'id': 18618, 'synset': 'golden_glow.n.01', 'name': 'golden_glow'}, {'id': 18619, 'synset': 'lavender_cotton.n.01', 'name': 'lavender_cotton'}, {'id': 18620, 'synset': 'creeping_zinnia.n.01', 'name': 'creeping_zinnia'}, {'id': 18621, 'synset': 'golden_thistle.n.01', 'name': 'golden_thistle'}, {'id': 18622, 'synset': 'spanish_oyster_plant.n.01', 'name': 'Spanish_oyster_plant'}, {'id': 18623, 'synset': 'nodding_groundsel.n.01', 'name': 'nodding_groundsel'}, {'id': 18624, 'synset': 'dusty_miller.n.02', 'name': 'dusty_miller'}, {'id': 18625, 'synset': 'butterweed.n.01', 'name': 'butterweed'}, {'id': 18626, 'synset': 'ragwort.n.01', 'name': 'ragwort'}, {'id': 18627, 'synset': 'arrowleaf_groundsel.n.01', 'name': 'arrowleaf_groundsel'}, {'id': 18628, 'synset': 'black_salsify.n.01', 'name': 'black_salsify'}, {'id': 18629, 'synset': 'white-topped_aster.n.01', 'name': 'white-topped_aster'}, {'id': 18630, 'synset': 'narrow-leaved_white-topped_aster.n.01', 'name': 'narrow-leaved_white-topped_aster'}, {'id': 18631, 'synset': 'silver_sage.n.01', 'name': 'silver_sage'}, {'id': 18632, 'synset': 'sea_wormwood.n.01', 'name': 'sea_wormwood'}, {'id': 18633, 'synset': 'sawwort.n.01', 'name': 'sawwort'}, {'id': 18634, 'synset': 'rosinweed.n.01', 'name': 'rosinweed'}, {'id': 18635, 'synset': 'milk_thistle.n.02', 'name': 'milk_thistle'}, {'id': 18636, 'synset': 'goldenrod.n.01', 'name': 'goldenrod'}, {'id': 18637, 'synset': 'silverrod.n.01', 'name': 'silverrod'}, {'id': 18638, 'synset': 'meadow_goldenrod.n.01', 'name': 'meadow_goldenrod'}, {'id': 18639, 'synset': 'missouri_goldenrod.n.01', 'name': 'Missouri_goldenrod'}, {'id': 18640, 'synset': 'alpine_goldenrod.n.01', 'name': 'alpine_goldenrod'}, {'id': 18641, 'synset': 'grey_goldenrod.n.01', 'name': 'grey_goldenrod'}, {'id': 18642, 'synset': 'blue_mountain_tea.n.01', 'name': 'Blue_Mountain_tea'}, {'id': 18643, 'synset': "dyer's_weed.n.01", 'name': "dyer's_weed"}, {'id': 18644, 'synset': 'seaside_goldenrod.n.01', 'name': 'seaside_goldenrod'}, {'id': 18645, 'synset': 'narrow_goldenrod.n.01', 'name': 'narrow_goldenrod'}, {'id': 18646, 'synset': "boott's_goldenrod.n.01", 'name': "Boott's_goldenrod"}, {'id': 18647, 'synset': "elliott's_goldenrod.n.01", 'name': "Elliott's_goldenrod"}, {'id': 18648, 'synset': 'ohio_goldenrod.n.01', 'name': 'Ohio_goldenrod'}, {'id': 18649, 'synset': 'rough-stemmed_goldenrod.n.01', 'name': 'rough-stemmed_goldenrod'}, {'id': 18650, 'synset': 'showy_goldenrod.n.01', 'name': 'showy_goldenrod'}, {'id': 18651, 'synset': 'tall_goldenrod.n.01', 'name': 'tall_goldenrod'}, {'id': 18652, 'synset': 'zigzag_goldenrod.n.01', 'name': 'zigzag_goldenrod'}, {'id': 18653, 'synset': 'sow_thistle.n.01', 'name': 'sow_thistle'}, {'id': 18654, 'synset': 'milkweed.n.02', 'name': 'milkweed'}, {'id': 18655, 'synset': 'stevia.n.01', 'name': 'stevia'}, {'id': 18656, 'synset': "stokes'_aster.n.01", 'name': "stokes'_aster"}, {'id': 18657, 'synset': 'marigold.n.01', 'name': 'marigold'}, {'id': 18658, 'synset': 'african_marigold.n.01', 'name': 'African_marigold'}, {'id': 18659, 'synset': 'french_marigold.n.01', 'name': 'French_marigold'}, {'id': 18660, 'synset': 'painted_daisy.n.01', 'name': 'painted_daisy'}, {'id': 18661, 'synset': 'pyrethrum.n.02', 'name': 'pyrethrum'}, {'id': 18662, 'synset': 'northern_dune_tansy.n.01', 'name': 'northern_dune_tansy'}, {'id': 18663, 'synset': 'feverfew.n.01', 'name': 'feverfew'}, {'id': 18664, 'synset': 'dusty_miller.n.01', 'name': 'dusty_miller'}, {'id': 18665, 'synset': 'tansy.n.01', 'name': 'tansy'}, {'id': 18666, 'synset': 'dandelion.n.01', 'name': 'dandelion'}, {'id': 18667, 'synset': 'common_dandelion.n.01', 'name': 'common_dandelion'}, {'id': 18668, 'synset': 'dandelion_green.n.01', 'name': 'dandelion_green'}, {'id': 18669, 'synset': 'russian_dandelion.n.01', 'name': 'Russian_dandelion'}, {'id': 18670, 'synset': 'stemless_hymenoxys.n.01', 'name': 'stemless_hymenoxys'}, {'id': 18671, 'synset': 'mexican_sunflower.n.01', 'name': 'Mexican_sunflower'}, {'id': 18672, 'synset': 'easter_daisy.n.01', 'name': 'Easter_daisy'}, {'id': 18673, 'synset': 'yellow_salsify.n.01', 'name': 'yellow_salsify'}, {'id': 18674, 'synset': 'salsify.n.02', 'name': 'salsify'}, {'id': 18675, 'synset': 'meadow_salsify.n.01', 'name': 'meadow_salsify'}, {'id': 18676, 'synset': 'scentless_camomile.n.01', 'name': 'scentless_camomile'}, {'id': 18677, 'synset': 'turfing_daisy.n.01', 'name': 'turfing_daisy'}, {'id': 18678, 'synset': 'coltsfoot.n.02', 'name': 'coltsfoot'}, {'id': 18679, 'synset': 'ursinia.n.01', 'name': 'ursinia'}, {'id': 18680, 'synset': 'crownbeard.n.01', 'name': 'crownbeard'}, {'id': 18681, 'synset': 'wingstem.n.01', 'name': 'wingstem'}, {'id': 18682, 'synset': 'cowpen_daisy.n.01', 'name': 'cowpen_daisy'}, {'id': 18683, 'synset': 'gravelweed.n.01', 'name': 'gravelweed'}, {'id': 18684, 'synset': 'virginia_crownbeard.n.01', 'name': 'Virginia_crownbeard'}, {'id': 18685, 'synset': 'ironweed.n.01', 'name': 'ironweed'}, {'id': 18686, 'synset': "mule's_ears.n.01", 'name': "mule's_ears"}, {'id': 18687, 'synset': "white-rayed_mule's_ears.n.01", 'name': "white-rayed_mule's_ears"}, {'id': 18688, 'synset': 'cocklebur.n.01', 'name': 'cocklebur'}, {'id': 18689, 'synset': 'xeranthemum.n.01', 'name': 'xeranthemum'}, {'id': 18690, 'synset': 'immortelle.n.01', 'name': 'immortelle'}, {'id': 18691, 'synset': 'zinnia.n.01', 'name': 'zinnia'}, {'id': 18692, 'synset': 'white_zinnia.n.01', 'name': 'white_zinnia'}, {'id': 18693, 'synset': 'little_golden_zinnia.n.01', 'name': 'little_golden_zinnia'}, {'id': 18694, 'synset': 'blazing_star.n.01', 'name': 'blazing_star'}, {'id': 18695, 'synset': 'bartonia.n.01', 'name': 'bartonia'}, {'id': 18696, 'synset': 'achene.n.01', 'name': 'achene'}, {'id': 18697, 'synset': 'samara.n.01', 'name': 'samara'}, {'id': 18698, 'synset': 'campanula.n.01', 'name': 'campanula'}, {'id': 18699, 'synset': 'creeping_bellflower.n.01', 'name': 'creeping_bellflower'}, {'id': 18700, 'synset': 'canterbury_bell.n.02', 'name': 'Canterbury_bell'}, {'id': 18701, 'synset': 'tall_bellflower.n.01', 'name': 'tall_bellflower'}, {'id': 18702, 'synset': 'marsh_bellflower.n.01', 'name': 'marsh_bellflower'}, {'id': 18703, 'synset': 'clustered_bellflower.n.01', 'name': 'clustered_bellflower'}, {'id': 18704, 'synset': 'peach_bells.n.01', 'name': 'peach_bells'}, {'id': 18705, 'synset': 'chimney_plant.n.01', 'name': 'chimney_plant'}, {'id': 18706, 'synset': 'rampion.n.01', 'name': 'rampion'}, {'id': 18707, 'synset': 'tussock_bellflower.n.01', 'name': 'tussock_bellflower'}, {'id': 18708, 'synset': 'orchid.n.01', 'name': 'orchid'}, {'id': 18709, 'synset': 'orchis.n.01', 'name': 'orchis'}, {'id': 18710, 'synset': 'male_orchis.n.01', 'name': 'male_orchis'}, {'id': 18711, 'synset': 'butterfly_orchid.n.05', 'name': 'butterfly_orchid'}, {'id': 18712, 'synset': 'showy_orchis.n.01', 'name': 'showy_orchis'}, {'id': 18713, 'synset': 'aerides.n.01', 'name': 'aerides'}, {'id': 18714, 'synset': 'angrecum.n.01', 'name': 'angrecum'}, {'id': 18715, 'synset': 'jewel_orchid.n.01', 'name': 'jewel_orchid'}, {'id': 18716, 'synset': 'puttyroot.n.01', 'name': 'puttyroot'}, {'id': 18717, 'synset': 'arethusa.n.01', 'name': 'arethusa'}, {'id': 18718, 'synset': 'bog_rose.n.01', 'name': 'bog_rose'}, {'id': 18719, 'synset': 'bletia.n.01', 'name': 'bletia'}, {'id': 18720, 'synset': 'bletilla_striata.n.01', 'name': 'Bletilla_striata'}, {'id': 18721, 'synset': 'brassavola.n.01', 'name': 'brassavola'}, {'id': 18722, 'synset': 'spider_orchid.n.03', 'name': 'spider_orchid'}, {'id': 18723, 'synset': 'spider_orchid.n.02', 'name': 'spider_orchid'}, {'id': 18724, 'synset': 'caladenia.n.01', 'name': 'caladenia'}, {'id': 18725, 'synset': 'calanthe.n.01', 'name': 'calanthe'}, {'id': 18726, 'synset': 'grass_pink.n.01', 'name': 'grass_pink'}, {'id': 18727, 'synset': 'calypso.n.01', 'name': 'calypso'}, {'id': 18728, 'synset': 'cattleya.n.01', 'name': 'cattleya'}, {'id': 18729, 'synset': 'helleborine.n.03', 'name': 'helleborine'}, {'id': 18730, 'synset': 'red_helleborine.n.01', 'name': 'red_helleborine'}, {'id': 18731, 'synset': 'spreading_pogonia.n.01', 'name': 'spreading_pogonia'}, {'id': 18732, 'synset': 'rosebud_orchid.n.01', 'name': 'rosebud_orchid'}, {'id': 18733, 'synset': 'satyr_orchid.n.01', 'name': 'satyr_orchid'}, {'id': 18734, 'synset': 'frog_orchid.n.02', 'name': 'frog_orchid'}, {'id': 18735, 'synset': 'coelogyne.n.01', 'name': 'coelogyne'}, {'id': 18736, 'synset': 'coral_root.n.01', 'name': 'coral_root'}, {'id': 18737, 'synset': 'spotted_coral_root.n.01', 'name': 'spotted_coral_root'}, {'id': 18738, 'synset': 'striped_coral_root.n.01', 'name': 'striped_coral_root'}, {'id': 18739, 'synset': 'early_coral_root.n.01', 'name': 'early_coral_root'}, {'id': 18740, 'synset': 'swan_orchid.n.01', 'name': 'swan_orchid'}, {'id': 18741, 'synset': 'cymbid.n.01', 'name': 'cymbid'}, {'id': 18742, 'synset': 'cypripedia.n.01', 'name': 'cypripedia'}, {'id': 18743, 'synset': "lady's_slipper.n.01", 'name': "lady's_slipper"}, {'id': 18744, 'synset': 'moccasin_flower.n.01', 'name': 'moccasin_flower'}, {'id': 18745, 'synset': "common_lady's-slipper.n.01", 'name': "common_lady's-slipper"}, {'id': 18746, 'synset': "ram's-head.n.01", 'name': "ram's-head"}, {'id': 18747, 'synset': "yellow_lady's_slipper.n.01", 'name': "yellow_lady's_slipper"}, {'id': 18748, 'synset': "large_yellow_lady's_slipper.n.01", 'name': "large_yellow_lady's_slipper"}, {'id': 18749, 'synset': "california_lady's_slipper.n.01", 'name': "California_lady's_slipper"}, {'id': 18750, 'synset': "clustered_lady's_slipper.n.01", 'name': "clustered_lady's_slipper"}, {'id': 18751, 'synset': "mountain_lady's_slipper.n.01", 'name': "mountain_lady's_slipper"}, {'id': 18752, 'synset': 'marsh_orchid.n.01', 'name': 'marsh_orchid'}, {'id': 18753, 'synset': 'common_spotted_orchid.n.01', 'name': 'common_spotted_orchid'}, {'id': 18754, 'synset': 'dendrobium.n.01', 'name': 'dendrobium'}, {'id': 18755, 'synset': 'disa.n.01', 'name': 'disa'}, {'id': 18756, 'synset': 'phantom_orchid.n.01', 'name': 'phantom_orchid'}, {'id': 18757, 'synset': 'tulip_orchid.n.01', 'name': 'tulip_orchid'}, {'id': 18758, 'synset': 'butterfly_orchid.n.04', 'name': 'butterfly_orchid'}, {'id': 18759, 'synset': 'butterfly_orchid.n.03', 'name': 'butterfly_orchid'}, {'id': 18760, 'synset': 'epidendron.n.01', 'name': 'epidendron'}, {'id': 18761, 'synset': 'helleborine.n.02', 'name': 'helleborine'}, {'id': 18762, 'synset': 'epipactis_helleborine.n.01', 'name': 'Epipactis_helleborine'}, {'id': 18763, 'synset': 'stream_orchid.n.01', 'name': 'stream_orchid'}, {'id': 18764, 'synset': 'tongueflower.n.01', 'name': 'tongueflower'}, {'id': 18765, 'synset': 'rattlesnake_plantain.n.01', 'name': 'rattlesnake_plantain'}, {'id': 18766, 'synset': 'fragrant_orchid.n.01', 'name': 'fragrant_orchid'}, {'id': 18767, 'synset': 'short-spurred_fragrant_orchid.n.01', 'name': 'short-spurred_fragrant_orchid'}, {'id': 18768, 'synset': 'fringed_orchis.n.01', 'name': 'fringed_orchis'}, {'id': 18769, 'synset': 'frog_orchid.n.01', 'name': 'frog_orchid'}, {'id': 18770, 'synset': 'rein_orchid.n.01', 'name': 'rein_orchid'}, {'id': 18771, 'synset': 'bog_rein_orchid.n.01', 'name': 'bog_rein_orchid'}, {'id': 18772, 'synset': 'white_fringed_orchis.n.01', 'name': 'white_fringed_orchis'}, {'id': 18773, 'synset': 'elegant_habenaria.n.01', 'name': 'elegant_Habenaria'}, {'id': 18774, 'synset': 'purple-fringed_orchid.n.02', 'name': 'purple-fringed_orchid'}, {'id': 18775, 'synset': 'coastal_rein_orchid.n.01', 'name': 'coastal_rein_orchid'}, {'id': 18776, 'synset': "hooker's_orchid.n.01", 'name': "Hooker's_orchid"}, {'id': 18777, 'synset': 'ragged_orchid.n.01', 'name': 'ragged_orchid'}, {'id': 18778, 'synset': 'prairie_orchid.n.01', 'name': 'prairie_orchid'}, {'id': 18779, 'synset': 'snowy_orchid.n.01', 'name': 'snowy_orchid'}, {'id': 18780, 'synset': 'round-leaved_rein_orchid.n.01', 'name': 'round-leaved_rein_orchid'}, {'id': 18781, 'synset': 'purple_fringeless_orchid.n.01', 'name': 'purple_fringeless_orchid'}, {'id': 18782, 'synset': 'purple-fringed_orchid.n.01', 'name': 'purple-fringed_orchid'}, {'id': 18783, 'synset': 'alaska_rein_orchid.n.01', 'name': 'Alaska_rein_orchid'}, {'id': 18784, 'synset': 'crested_coral_root.n.01', 'name': 'crested_coral_root'}, {'id': 18785, 'synset': 'texas_purple_spike.n.01', 'name': 'Texas_purple_spike'}, {'id': 18786, 'synset': 'lizard_orchid.n.01', 'name': 'lizard_orchid'}, {'id': 18787, 'synset': 'laelia.n.01', 'name': 'laelia'}, {'id': 18788, 'synset': 'liparis.n.01', 'name': 'liparis'}, {'id': 18789, 'synset': 'twayblade.n.02', 'name': 'twayblade'}, {'id': 18790, 'synset': 'fen_orchid.n.01', 'name': 'fen_orchid'}, {'id': 18791, 'synset': 'broad-leaved_twayblade.n.01', 'name': 'broad-leaved_twayblade'}, {'id': 18792, 'synset': 'lesser_twayblade.n.01', 'name': 'lesser_twayblade'}, {'id': 18793, 'synset': 'twayblade.n.01', 'name': 'twayblade'}, {'id': 18794, 'synset': "green_adder's_mouth.n.01", 'name': "green_adder's_mouth"}, {'id': 18795, 'synset': 'masdevallia.n.01', 'name': 'masdevallia'}, {'id': 18796, 'synset': 'maxillaria.n.01', 'name': 'maxillaria'}, {'id': 18797, 'synset': 'pansy_orchid.n.01', 'name': 'pansy_orchid'}, {'id': 18798, 'synset': 'odontoglossum.n.01', 'name': 'odontoglossum'}, {'id': 18799, 'synset': 'oncidium.n.01', 'name': 'oncidium'}, {'id': 18800, 'synset': 'bee_orchid.n.01', 'name': 'bee_orchid'}, {'id': 18801, 'synset': 'fly_orchid.n.02', 'name': 'fly_orchid'}, {'id': 18802, 'synset': 'spider_orchid.n.01', 'name': 'spider_orchid'}, {'id': 18803, 'synset': 'early_spider_orchid.n.01', 'name': 'early_spider_orchid'}, {'id': 18804, 'synset': "venus'_slipper.n.01", 'name': "Venus'_slipper"}, {'id': 18805, 'synset': 'phaius.n.01', 'name': 'phaius'}, {'id': 18806, 'synset': 'moth_orchid.n.01', 'name': 'moth_orchid'}, {'id': 18807, 'synset': 'butterfly_plant.n.01', 'name': 'butterfly_plant'}, {'id': 18808, 'synset': 'rattlesnake_orchid.n.01', 'name': 'rattlesnake_orchid'}, {'id': 18809, 'synset': 'lesser_butterfly_orchid.n.01', 'name': 'lesser_butterfly_orchid'}, {'id': 18810, 'synset': 'greater_butterfly_orchid.n.01', 'name': 'greater_butterfly_orchid'}, {'id': 18811, 'synset': 'prairie_white-fringed_orchid.n.01', 'name': 'prairie_white-fringed_orchid'}, {'id': 18812, 'synset': 'tangle_orchid.n.01', 'name': 'tangle_orchid'}, {'id': 18813, 'synset': 'indian_crocus.n.01', 'name': 'Indian_crocus'}, {'id': 18814, 'synset': 'pleurothallis.n.01', 'name': 'pleurothallis'}, {'id': 18815, 'synset': 'pogonia.n.01', 'name': 'pogonia'}, {'id': 18816, 'synset': 'butterfly_orchid.n.01', 'name': 'butterfly_orchid'}, {'id': 18817, 'synset': 'psychopsis_krameriana.n.01', 'name': 'Psychopsis_krameriana'}, {'id': 18818, 'synset': 'psychopsis_papilio.n.01', 'name': 'Psychopsis_papilio'}, {'id': 18819, 'synset': 'helmet_orchid.n.01', 'name': 'helmet_orchid'}, {'id': 18820, 'synset': 'foxtail_orchid.n.01', 'name': 'foxtail_orchid'}, {'id': 18821, 'synset': 'orange-blossom_orchid.n.01', 'name': 'orange-blossom_orchid'}, {'id': 18822, 'synset': 'sobralia.n.01', 'name': 'sobralia'}, {'id': 18823, 'synset': "ladies'_tresses.n.01", 'name': "ladies'_tresses"}, {'id': 18824, 'synset': 'screw_augur.n.01', 'name': 'screw_augur'}, {'id': 18825, 'synset': "hooded_ladies'_tresses.n.01", 'name': "hooded_ladies'_tresses"}, {'id': 18826, 'synset': "western_ladies'_tresses.n.01", 'name': "western_ladies'_tresses"}, {'id': 18827, 'synset': "european_ladies'_tresses.n.01", 'name': "European_ladies'_tresses"}, {'id': 18828, 'synset': 'stanhopea.n.01', 'name': 'stanhopea'}, {'id': 18829, 'synset': 'stelis.n.01', 'name': 'stelis'}, {'id': 18830, 'synset': 'fly_orchid.n.01', 'name': 'fly_orchid'}, {'id': 18831, 'synset': 'vanda.n.01', 'name': 'vanda'}, {'id': 18832, 'synset': 'blue_orchid.n.01', 'name': 'blue_orchid'}, {'id': 18833, 'synset': 'vanilla.n.01', 'name': 'vanilla'}, {'id': 18834, 'synset': 'vanilla_orchid.n.01', 'name': 'vanilla_orchid'}, {'id': 18835, 'synset': 'yam.n.02', 'name': 'yam'}, {'id': 18836, 'synset': 'yam.n.01', 'name': 'yam'}, {'id': 18837, 'synset': 'white_yam.n.01', 'name': 'white_yam'}, {'id': 18838, 'synset': 'cinnamon_vine.n.01', 'name': 'cinnamon_vine'}, {'id': 18839, 'synset': "elephant's-foot.n.01", 'name': "elephant's-foot"}, {'id': 18840, 'synset': 'wild_yam.n.01', 'name': 'wild_yam'}, {'id': 18841, 'synset': 'cush-cush.n.01', 'name': 'cush-cush'}, {'id': 18842, 'synset': 'black_bryony.n.01', 'name': 'black_bryony'}, {'id': 18843, 'synset': 'primrose.n.01', 'name': 'primrose'}, {'id': 18844, 'synset': 'english_primrose.n.01', 'name': 'English_primrose'}, {'id': 18845, 'synset': 'cowslip.n.01', 'name': 'cowslip'}, {'id': 18846, 'synset': 'oxlip.n.01', 'name': 'oxlip'}, {'id': 18847, 'synset': 'chinese_primrose.n.01', 'name': 'Chinese_primrose'}, {'id': 18848, 'synset': 'polyanthus.n.01', 'name': 'polyanthus'}, {'id': 18849, 'synset': 'pimpernel.n.02', 'name': 'pimpernel'}, {'id': 18850, 'synset': 'scarlet_pimpernel.n.01', 'name': 'scarlet_pimpernel'}, {'id': 18851, 'synset': 'bog_pimpernel.n.01', 'name': 'bog_pimpernel'}, {'id': 18852, 'synset': 'chaffweed.n.01', 'name': 'chaffweed'}, {'id': 18853, 'synset': 'cyclamen.n.01', 'name': 'cyclamen'}, {'id': 18854, 'synset': 'sowbread.n.01', 'name': 'sowbread'}, {'id': 18855, 'synset': 'sea_milkwort.n.01', 'name': 'sea_milkwort'}, {'id': 18856, 'synset': 'featherfoil.n.01', 'name': 'featherfoil'}, {'id': 18857, 'synset': 'water_gillyflower.n.01', 'name': 'water_gillyflower'}, {'id': 18858, 'synset': 'water_violet.n.01', 'name': 'water_violet'}, {'id': 18859, 'synset': 'loosestrife.n.02', 'name': 'loosestrife'}, {'id': 18860, 'synset': 'gooseneck_loosestrife.n.01', 'name': 'gooseneck_loosestrife'}, {'id': 18861, 'synset': 'yellow_pimpernel.n.01', 'name': 'yellow_pimpernel'}, {'id': 18862, 'synset': 'fringed_loosestrife.n.01', 'name': 'fringed_loosestrife'}, {'id': 18863, 'synset': 'moneywort.n.01', 'name': 'moneywort'}, {'id': 18864, 'synset': 'swamp_candles.n.01', 'name': 'swamp_candles'}, {'id': 18865, 'synset': 'whorled_loosestrife.n.01', 'name': 'whorled_loosestrife'}, {'id': 18866, 'synset': 'water_pimpernel.n.01', 'name': 'water_pimpernel'}, {'id': 18867, 'synset': 'brookweed.n.02', 'name': 'brookweed'}, {'id': 18868, 'synset': 'brookweed.n.01', 'name': 'brookweed'}, {'id': 18869, 'synset': 'coralberry.n.02', 'name': 'coralberry'}, {'id': 18870, 'synset': 'marlberry.n.01', 'name': 'marlberry'}, {'id': 18871, 'synset': 'plumbago.n.02', 'name': 'plumbago'}, {'id': 18872, 'synset': 'leadwort.n.01', 'name': 'leadwort'}, {'id': 18873, 'synset': 'thrift.n.01', 'name': 'thrift'}, {'id': 18874, 'synset': 'sea_lavender.n.01', 'name': 'sea_lavender'}, {'id': 18875, 'synset': 'barbasco.n.01', 'name': 'barbasco'}, {'id': 18876, 'synset': 'gramineous_plant.n.01', 'name': 'gramineous_plant'}, {'id': 18877, 'synset': 'grass.n.01', 'name': 'grass'}, {'id': 18878, 'synset': 'midgrass.n.01', 'name': 'midgrass'}, {'id': 18879, 'synset': 'shortgrass.n.01', 'name': 'shortgrass'}, {'id': 18880, 'synset': 'sword_grass.n.01', 'name': 'sword_grass'}, {'id': 18881, 'synset': 'tallgrass.n.01', 'name': 'tallgrass'}, {'id': 18882, 'synset': 'herbage.n.01', 'name': 'herbage'}, {'id': 18883, 'synset': 'goat_grass.n.01', 'name': 'goat_grass'}, {'id': 18884, 'synset': 'wheatgrass.n.01', 'name': 'wheatgrass'}, {'id': 18885, 'synset': 'crested_wheatgrass.n.01', 'name': 'crested_wheatgrass'}, {'id': 18886, 'synset': 'bearded_wheatgrass.n.01', 'name': 'bearded_wheatgrass'}, {'id': 18887, 'synset': 'western_wheatgrass.n.01', 'name': 'western_wheatgrass'}, {'id': 18888, 'synset': 'intermediate_wheatgrass.n.01', 'name': 'intermediate_wheatgrass'}, {'id': 18889, 'synset': 'slender_wheatgrass.n.01', 'name': 'slender_wheatgrass'}, {'id': 18890, 'synset': 'velvet_bent.n.01', 'name': 'velvet_bent'}, {'id': 18891, 'synset': 'cloud_grass.n.01', 'name': 'cloud_grass'}, {'id': 18892, 'synset': 'meadow_foxtail.n.01', 'name': 'meadow_foxtail'}, {'id': 18893, 'synset': 'foxtail.n.01', 'name': 'foxtail'}, {'id': 18894, 'synset': 'broom_grass.n.01', 'name': 'broom_grass'}, {'id': 18895, 'synset': 'broom_sedge.n.01', 'name': 'broom_sedge'}, {'id': 18896, 'synset': 'tall_oat_grass.n.01', 'name': 'tall_oat_grass'}, {'id': 18897, 'synset': 'toetoe.n.02', 'name': 'toetoe'}, {'id': 18898, 'synset': 'oat.n.01', 'name': 'oat'}, {'id': 18899, 'synset': 'cereal_oat.n.01', 'name': 'cereal_oat'}, {'id': 18900, 'synset': 'wild_oat.n.01', 'name': 'wild_oat'}, {'id': 18901, 'synset': 'slender_wild_oat.n.01', 'name': 'slender_wild_oat'}, {'id': 18902, 'synset': 'wild_red_oat.n.01', 'name': 'wild_red_oat'}, {'id': 18903, 'synset': 'brome.n.01', 'name': 'brome'}, {'id': 18904, 'synset': 'chess.n.01', 'name': 'chess'}, {'id': 18905, 'synset': 'field_brome.n.01', 'name': 'field_brome'}, {'id': 18906, 'synset': 'grama.n.01', 'name': 'grama'}, {'id': 18907, 'synset': 'black_grama.n.01', 'name': 'black_grama'}, {'id': 18908, 'synset': 'buffalo_grass.n.02', 'name': 'buffalo_grass'}, {'id': 18909, 'synset': 'reed_grass.n.01', 'name': 'reed_grass'}, {'id': 18910, 'synset': 'feather_reed_grass.n.01', 'name': 'feather_reed_grass'}, {'id': 18911, 'synset': 'australian_reed_grass.n.01', 'name': 'Australian_reed_grass'}, {'id': 18912, 'synset': 'burgrass.n.01', 'name': 'burgrass'}, {'id': 18913, 'synset': 'buffel_grass.n.01', 'name': 'buffel_grass'}, {'id': 18914, 'synset': 'rhodes_grass.n.01', 'name': 'Rhodes_grass'}, {'id': 18915, 'synset': 'pampas_grass.n.01', 'name': 'pampas_grass'}, {'id': 18916, 'synset': 'giant_star_grass.n.01', 'name': 'giant_star_grass'}, {'id': 18917, 'synset': 'orchard_grass.n.01', 'name': 'orchard_grass'}, {'id': 18918, 'synset': 'egyptian_grass.n.01', 'name': 'Egyptian_grass'}, {'id': 18919, 'synset': 'crabgrass.n.01', 'name': 'crabgrass'}, {'id': 18920, 'synset': 'smooth_crabgrass.n.01', 'name': 'smooth_crabgrass'}, {'id': 18921, 'synset': 'large_crabgrass.n.01', 'name': 'large_crabgrass'}, {'id': 18922, 'synset': 'barnyard_grass.n.01', 'name': 'barnyard_grass'}, {'id': 18923, 'synset': 'japanese_millet.n.01', 'name': 'Japanese_millet'}, {'id': 18924, 'synset': 'yardgrass.n.01', 'name': 'yardgrass'}, {'id': 18925, 'synset': 'finger_millet.n.01', 'name': 'finger_millet'}, {'id': 18926, 'synset': 'lyme_grass.n.01', 'name': 'lyme_grass'}, {'id': 18927, 'synset': 'wild_rye.n.01', 'name': 'wild_rye'}, {'id': 18928, 'synset': 'giant_ryegrass.n.01', 'name': 'giant_ryegrass'}, {'id': 18929, 'synset': 'sea_lyme_grass.n.01', 'name': 'sea_lyme_grass'}, {'id': 18930, 'synset': 'canada_wild_rye.n.01', 'name': 'Canada_wild_rye'}, {'id': 18931, 'synset': 'teff.n.01', 'name': 'teff'}, {'id': 18932, 'synset': 'weeping_love_grass.n.01', 'name': 'weeping_love_grass'}, {'id': 18933, 'synset': 'plume_grass.n.01', 'name': 'plume_grass'}, {'id': 18934, 'synset': 'ravenna_grass.n.01', 'name': 'Ravenna_grass'}, {'id': 18935, 'synset': 'fescue.n.01', 'name': 'fescue'}, {'id': 18936, 'synset': 'reed_meadow_grass.n.01', 'name': 'reed_meadow_grass'}, {'id': 18937, 'synset': 'velvet_grass.n.01', 'name': 'velvet_grass'}, {'id': 18938, 'synset': 'creeping_soft_grass.n.01', 'name': 'creeping_soft_grass'}, {'id': 18939, 'synset': 'barleycorn.n.01', 'name': 'barleycorn'}, {'id': 18940, 'synset': 'barley_grass.n.01', 'name': 'barley_grass'}, {'id': 18941, 'synset': 'little_barley.n.01', 'name': 'little_barley'}, {'id': 18942, 'synset': 'rye_grass.n.01', 'name': 'rye_grass'}, {'id': 18943, 'synset': 'perennial_ryegrass.n.01', 'name': 'perennial_ryegrass'}, {'id': 18944, 'synset': 'italian_ryegrass.n.01', 'name': 'Italian_ryegrass'}, {'id': 18945, 'synset': 'darnel.n.01', 'name': 'darnel'}, {'id': 18946, 'synset': 'nimblewill.n.01', 'name': 'nimblewill'}, {'id': 18947, 'synset': 'cultivated_rice.n.01', 'name': 'cultivated_rice'}, {'id': 18948, 'synset': 'ricegrass.n.01', 'name': 'ricegrass'}, {'id': 18949, 'synset': 'smilo.n.01', 'name': 'smilo'}, {'id': 18950, 'synset': 'switch_grass.n.01', 'name': 'switch_grass'}, {'id': 18951, 'synset': 'broomcorn_millet.n.01', 'name': 'broomcorn_millet'}, {'id': 18952, 'synset': 'goose_grass.n.03', 'name': 'goose_grass'}, {'id': 18953, 'synset': 'dallisgrass.n.01', 'name': 'dallisgrass'}, {'id': 18954, 'synset': 'bahia_grass.n.01', 'name': 'Bahia_grass'}, {'id': 18955, 'synset': 'knotgrass.n.01', 'name': 'knotgrass'}, {'id': 18956, 'synset': 'fountain_grass.n.01', 'name': 'fountain_grass'}, {'id': 18957, 'synset': 'reed_canary_grass.n.01', 'name': 'reed_canary_grass'}, {'id': 18958, 'synset': 'canary_grass.n.01', 'name': 'canary_grass'}, {'id': 18959, 'synset': 'timothy.n.01', 'name': 'timothy'}, {'id': 18960, 'synset': 'bluegrass.n.01', 'name': 'bluegrass'}, {'id': 18961, 'synset': 'meadowgrass.n.01', 'name': 'meadowgrass'}, {'id': 18962, 'synset': 'wood_meadowgrass.n.01', 'name': 'wood_meadowgrass'}, {'id': 18963, 'synset': 'noble_cane.n.01', 'name': 'noble_cane'}, {'id': 18964, 'synset': 'munj.n.01', 'name': 'munj'}, {'id': 18965, 'synset': 'broom_beard_grass.n.01', 'name': 'broom_beard_grass'}, {'id': 18966, 'synset': 'bluestem.n.01', 'name': 'bluestem'}, {'id': 18967, 'synset': 'rye.n.02', 'name': 'rye'}, {'id': 18968, 'synset': 'bristlegrass.n.01', 'name': 'bristlegrass'}, {'id': 18969, 'synset': 'giant_foxtail.n.01', 'name': 'giant_foxtail'}, {'id': 18970, 'synset': 'yellow_bristlegrass.n.01', 'name': 'yellow_bristlegrass'}, {'id': 18971, 'synset': 'green_bristlegrass.n.01', 'name': 'green_bristlegrass'}, {'id': 18972, 'synset': 'siberian_millet.n.01', 'name': 'Siberian_millet'}, {'id': 18973, 'synset': 'german_millet.n.01', 'name': 'German_millet'}, {'id': 18974, 'synset': 'millet.n.01', 'name': 'millet'}, {'id': 18975, 'synset': 'rattan.n.02', 'name': 'rattan'}, {'id': 18976, 'synset': 'malacca.n.01', 'name': 'malacca'}, {'id': 18977, 'synset': 'reed.n.01', 'name': 'reed'}, {'id': 18978, 'synset': 'sorghum.n.01', 'name': 'sorghum'}, {'id': 18979, 'synset': 'grain_sorghum.n.01', 'name': 'grain_sorghum'}, {'id': 18980, 'synset': 'durra.n.01', 'name': 'durra'}, {'id': 18981, 'synset': 'feterita.n.01', 'name': 'feterita'}, {'id': 18982, 'synset': 'hegari.n.01', 'name': 'hegari'}, {'id': 18983, 'synset': 'kaoliang.n.01', 'name': 'kaoliang'}, {'id': 18984, 'synset': 'milo.n.01', 'name': 'milo'}, {'id': 18985, 'synset': 'shallu.n.01', 'name': 'shallu'}, {'id': 18986, 'synset': 'broomcorn.n.01', 'name': 'broomcorn'}, {'id': 18987, 'synset': 'cordgrass.n.01', 'name': 'cordgrass'}, {'id': 18988, 'synset': 'salt_reed_grass.n.01', 'name': 'salt_reed_grass'}, {'id': 18989, 'synset': 'prairie_cordgrass.n.01', 'name': 'prairie_cordgrass'}, {'id': 18990, 'synset': 'smut_grass.n.01', 'name': 'smut_grass'}, {'id': 18991, 'synset': 'sand_dropseed.n.01', 'name': 'sand_dropseed'}, {'id': 18992, 'synset': 'rush_grass.n.01', 'name': 'rush_grass'}, {'id': 18993, 'synset': 'st._augustine_grass.n.01', 'name': 'St._Augustine_grass'}, {'id': 18994, 'synset': 'grain.n.08', 'name': 'grain'}, {'id': 18995, 'synset': 'cereal.n.01', 'name': 'cereal'}, {'id': 18996, 'synset': 'wheat.n.01', 'name': 'wheat'}, {'id': 18997, 'synset': 'wheat_berry.n.01', 'name': 'wheat_berry'}, {'id': 18998, 'synset': 'durum.n.01', 'name': 'durum'}, {'id': 18999, 'synset': 'spelt.n.01', 'name': 'spelt'}, {'id': 19000, 'synset': 'emmer.n.01', 'name': 'emmer'}, {'id': 19001, 'synset': 'wild_wheat.n.01', 'name': 'wild_wheat'}, {'id': 19002, 'synset': 'corn.n.01', 'name': 'corn'}, {'id': 19003, 'synset': 'mealie.n.01', 'name': 'mealie'}, {'id': 19004, 'synset': 'corn.n.02', 'name': 'corn'}, {'id': 19005, 'synset': 'dent_corn.n.01', 'name': 'dent_corn'}, {'id': 19006, 'synset': 'flint_corn.n.01', 'name': 'flint_corn'}, {'id': 19007, 'synset': 'popcorn.n.01', 'name': 'popcorn'}, {'id': 19008, 'synset': 'zoysia.n.01', 'name': 'zoysia'}, {'id': 19009, 'synset': 'manila_grass.n.01', 'name': 'Manila_grass'}, {'id': 19010, 'synset': 'korean_lawn_grass.n.01', 'name': 'Korean_lawn_grass'}, {'id': 19011, 'synset': 'common_bamboo.n.01', 'name': 'common_bamboo'}, {'id': 19012, 'synset': 'giant_bamboo.n.01', 'name': 'giant_bamboo'}, {'id': 19013, 'synset': 'umbrella_plant.n.03', 'name': 'umbrella_plant'}, {'id': 19014, 'synset': 'chufa.n.01', 'name': 'chufa'}, {'id': 19015, 'synset': 'galingale.n.01', 'name': 'galingale'}, {'id': 19016, 'synset': 'nutgrass.n.01', 'name': 'nutgrass'}, {'id': 19017, 'synset': 'sand_sedge.n.01', 'name': 'sand_sedge'}, {'id': 19018, 'synset': 'cypress_sedge.n.01', 'name': 'cypress_sedge'}, {'id': 19019, 'synset': 'cotton_grass.n.01', 'name': 'cotton_grass'}, {'id': 19020, 'synset': 'common_cotton_grass.n.01', 'name': 'common_cotton_grass'}, {'id': 19021, 'synset': 'hardstem_bulrush.n.01', 'name': 'hardstem_bulrush'}, {'id': 19022, 'synset': 'wool_grass.n.01', 'name': 'wool_grass'}, {'id': 19023, 'synset': 'spike_rush.n.01', 'name': 'spike_rush'}, {'id': 19024, 'synset': 'water_chestnut.n.02', 'name': 'water_chestnut'}, {'id': 19025, 'synset': 'needle_spike_rush.n.01', 'name': 'needle_spike_rush'}, {'id': 19026, 'synset': 'creeping_spike_rush.n.01', 'name': 'creeping_spike_rush'}, {'id': 19027, 'synset': 'pandanus.n.02', 'name': 'pandanus'}, {'id': 19028, 'synset': 'textile_screw_pine.n.01', 'name': 'textile_screw_pine'}, {'id': 19029, 'synset': 'cattail.n.01', 'name': 'cattail'}, {'id': 19030, 'synset': "cat's-tail.n.01", 'name': "cat's-tail"}, {'id': 19031, 'synset': 'bur_reed.n.01', 'name': 'bur_reed'}, {'id': 19032, 'synset': 'grain.n.07', 'name': 'grain'}, {'id': 19033, 'synset': 'kernel.n.02', 'name': 'kernel'}, {'id': 19034, 'synset': 'rye.n.01', 'name': 'rye'}, {'id': 19035, 'synset': 'gourd.n.03', 'name': 'gourd'}, {'id': 19036, 'synset': 'pumpkin.n.01', 'name': 'pumpkin'}, {'id': 19037, 'synset': 'squash.n.01', 'name': 'squash'}, {'id': 19038, 'synset': 'summer_squash.n.01', 'name': 'summer_squash'}, {'id': 19039, 'synset': 'yellow_squash.n.01', 'name': 'yellow_squash'}, {'id': 19040, 'synset': 'marrow.n.02', 'name': 'marrow'}, {'id': 19041, 'synset': 'zucchini.n.01', 'name': 'zucchini'}, {'id': 19042, 'synset': 'cocozelle.n.01', 'name': 'cocozelle'}, {'id': 19043, 'synset': 'cymling.n.01', 'name': 'cymling'}, {'id': 19044, 'synset': 'spaghetti_squash.n.01', 'name': 'spaghetti_squash'}, {'id': 19045, 'synset': 'winter_squash.n.01', 'name': 'winter_squash'}, {'id': 19046, 'synset': 'acorn_squash.n.01', 'name': 'acorn_squash'}, {'id': 19047, 'synset': 'hubbard_squash.n.01', 'name': 'hubbard_squash'}, {'id': 19048, 'synset': 'turban_squash.n.01', 'name': 'turban_squash'}, {'id': 19049, 'synset': 'buttercup_squash.n.01', 'name': 'buttercup_squash'}, {'id': 19050, 'synset': 'butternut_squash.n.01', 'name': 'butternut_squash'}, {'id': 19051, 'synset': 'winter_crookneck.n.01', 'name': 'winter_crookneck'}, {'id': 19052, 'synset': 'cushaw.n.01', 'name': 'cushaw'}, {'id': 19053, 'synset': 'prairie_gourd.n.02', 'name': 'prairie_gourd'}, {'id': 19054, 'synset': 'prairie_gourd.n.01', 'name': 'prairie_gourd'}, {'id': 19055, 'synset': 'bryony.n.01', 'name': 'bryony'}, {'id': 19056, 'synset': 'white_bryony.n.01', 'name': 'white_bryony'}, {'id': 19057, 'synset': 'sweet_melon.n.01', 'name': 'sweet_melon'}, {'id': 19058, 'synset': 'cantaloupe.n.01', 'name': 'cantaloupe'}, {'id': 19059, 'synset': 'winter_melon.n.01', 'name': 'winter_melon'}, {'id': 19060, 'synset': 'net_melon.n.01', 'name': 'net_melon'}, {'id': 19061, 'synset': 'cucumber.n.01', 'name': 'cucumber'}, {'id': 19062, 'synset': 'squirting_cucumber.n.01', 'name': 'squirting_cucumber'}, {'id': 19063, 'synset': 'bottle_gourd.n.01', 'name': 'bottle_gourd'}, {'id': 19064, 'synset': 'luffa.n.02', 'name': 'luffa'}, {'id': 19065, 'synset': 'loofah.n.02', 'name': 'loofah'}, {'id': 19066, 'synset': 'angled_loofah.n.01', 'name': 'angled_loofah'}, {'id': 19067, 'synset': 'loofa.n.01', 'name': 'loofa'}, {'id': 19068, 'synset': 'balsam_apple.n.01', 'name': 'balsam_apple'}, {'id': 19069, 'synset': 'balsam_pear.n.01', 'name': 'balsam_pear'}, {'id': 19070, 'synset': 'lobelia.n.01', 'name': 'lobelia'}, {'id': 19071, 'synset': 'water_lobelia.n.01', 'name': 'water_lobelia'}, {'id': 19072, 'synset': 'mallow.n.01', 'name': 'mallow'}, {'id': 19073, 'synset': 'musk_mallow.n.02', 'name': 'musk_mallow'}, {'id': 19074, 'synset': 'common_mallow.n.01', 'name': 'common_mallow'}, {'id': 19075, 'synset': 'okra.n.02', 'name': 'okra'}, {'id': 19076, 'synset': 'okra.n.01', 'name': 'okra'}, {'id': 19077, 'synset': 'abelmosk.n.01', 'name': 'abelmosk'}, {'id': 19078, 'synset': 'flowering_maple.n.01', 'name': 'flowering_maple'}, {'id': 19079, 'synset': 'velvetleaf.n.02', 'name': 'velvetleaf'}, {'id': 19080, 'synset': 'hollyhock.n.02', 'name': 'hollyhock'}, {'id': 19081, 'synset': 'rose_mallow.n.02', 'name': 'rose_mallow'}, {'id': 19082, 'synset': 'althea.n.01', 'name': 'althea'}, {'id': 19083, 'synset': 'marsh_mallow.n.01', 'name': 'marsh_mallow'}, {'id': 19084, 'synset': 'poppy_mallow.n.01', 'name': 'poppy_mallow'}, {'id': 19085, 'synset': 'fringed_poppy_mallow.n.01', 'name': 'fringed_poppy_mallow'}, {'id': 19086, 'synset': 'purple_poppy_mallow.n.01', 'name': 'purple_poppy_mallow'}, {'id': 19087, 'synset': 'clustered_poppy_mallow.n.01', 'name': 'clustered_poppy_mallow'}, {'id': 19088, 'synset': 'sea_island_cotton.n.01', 'name': 'sea_island_cotton'}, {'id': 19089, 'synset': 'levant_cotton.n.01', 'name': 'Levant_cotton'}, {'id': 19090, 'synset': 'upland_cotton.n.01', 'name': 'upland_cotton'}, {'id': 19091, 'synset': 'peruvian_cotton.n.01', 'name': 'Peruvian_cotton'}, {'id': 19092, 'synset': 'wild_cotton.n.01', 'name': 'wild_cotton'}, {'id': 19093, 'synset': 'kenaf.n.02', 'name': 'kenaf'}, {'id': 19094, 'synset': 'sorrel_tree.n.02', 'name': 'sorrel_tree'}, {'id': 19095, 'synset': 'rose_mallow.n.01', 'name': 'rose_mallow'}, {'id': 19096, 'synset': 'cotton_rose.n.01', 'name': 'cotton_rose'}, {'id': 19097, 'synset': 'roselle.n.01', 'name': 'roselle'}, {'id': 19098, 'synset': 'mahoe.n.01', 'name': 'mahoe'}, {'id': 19099, 'synset': 'flower-of-an-hour.n.01', 'name': 'flower-of-an-hour'}, {'id': 19100, 'synset': 'lacebark.n.01', 'name': 'lacebark'}, {'id': 19101, 'synset': 'wild_hollyhock.n.02', 'name': 'wild_hollyhock'}, {'id': 19102, 'synset': 'mountain_hollyhock.n.01', 'name': 'mountain_hollyhock'}, {'id': 19103, 'synset': 'seashore_mallow.n.01', 'name': 'seashore_mallow'}, {'id': 19104, 'synset': 'salt_marsh_mallow.n.01', 'name': 'salt_marsh_mallow'}, {'id': 19105, 'synset': 'chaparral_mallow.n.01', 'name': 'chaparral_mallow'}, {'id': 19106, 'synset': 'malope.n.01', 'name': 'malope'}, {'id': 19107, 'synset': 'false_mallow.n.02', 'name': 'false_mallow'}, {'id': 19108, 'synset': 'waxmallow.n.01', 'name': 'waxmallow'}, {'id': 19109, 'synset': 'glade_mallow.n.01', 'name': 'glade_mallow'}, {'id': 19110, 'synset': 'pavonia.n.01', 'name': 'pavonia'}, {'id': 19111, 'synset': 'ribbon_tree.n.01', 'name': 'ribbon_tree'}, {'id': 19112, 'synset': 'bush_hibiscus.n.01', 'name': 'bush_hibiscus'}, {'id': 19113, 'synset': 'virginia_mallow.n.01', 'name': 'Virginia_mallow'}, {'id': 19114, 'synset': 'queensland_hemp.n.01', 'name': 'Queensland_hemp'}, {'id': 19115, 'synset': 'indian_mallow.n.01', 'name': 'Indian_mallow'}, {'id': 19116, 'synset': 'checkerbloom.n.01', 'name': 'checkerbloom'}, {'id': 19117, 'synset': 'globe_mallow.n.01', 'name': 'globe_mallow'}, {'id': 19118, 'synset': 'prairie_mallow.n.01', 'name': 'prairie_mallow'}, {'id': 19119, 'synset': 'tulipwood_tree.n.01', 'name': 'tulipwood_tree'}, {'id': 19120, 'synset': 'portia_tree.n.01', 'name': 'portia_tree'}, {'id': 19121, 'synset': 'red_silk-cotton_tree.n.01', 'name': 'red_silk-cotton_tree'}, {'id': 19122, 'synset': 'cream-of-tartar_tree.n.01', 'name': 'cream-of-tartar_tree'}, {'id': 19123, 'synset': 'baobab.n.01', 'name': 'baobab'}, {'id': 19124, 'synset': 'kapok.n.02', 'name': 'kapok'}, {'id': 19125, 'synset': 'durian.n.01', 'name': 'durian'}, {'id': 19126, 'synset': 'montezuma.n.01', 'name': 'Montezuma'}, {'id': 19127, 'synset': 'shaving-brush_tree.n.01', 'name': 'shaving-brush_tree'}, {'id': 19128, 'synset': 'quandong.n.03', 'name': 'quandong'}, {'id': 19129, 'synset': 'quandong.n.02', 'name': 'quandong'}, {'id': 19130, 'synset': 'makomako.n.01', 'name': 'makomako'}, {'id': 19131, 'synset': 'jamaican_cherry.n.01', 'name': 'Jamaican_cherry'}, {'id': 19132, 'synset': 'breakax.n.01', 'name': 'breakax'}, {'id': 19133, 'synset': 'sterculia.n.01', 'name': 'sterculia'}, {'id': 19134, 'synset': 'panama_tree.n.01', 'name': 'Panama_tree'}, {'id': 19135, 'synset': 'kalumpang.n.01', 'name': 'kalumpang'}, {'id': 19136, 'synset': 'bottle-tree.n.01', 'name': 'bottle-tree'}, {'id': 19137, 'synset': 'flame_tree.n.04', 'name': 'flame_tree'}, {'id': 19138, 'synset': 'flame_tree.n.03', 'name': 'flame_tree'}, {'id': 19139, 'synset': 'kurrajong.n.01', 'name': 'kurrajong'}, {'id': 19140, 'synset': 'queensland_bottletree.n.01', 'name': 'Queensland_bottletree'}, {'id': 19141, 'synset': 'kola.n.01', 'name': 'kola'}, {'id': 19142, 'synset': 'kola_nut.n.01', 'name': 'kola_nut'}, {'id': 19143, 'synset': 'chinese_parasol_tree.n.01', 'name': 'Chinese_parasol_tree'}, {'id': 19144, 'synset': 'flannelbush.n.01', 'name': 'flannelbush'}, {'id': 19145, 'synset': 'screw_tree.n.01', 'name': 'screw_tree'}, {'id': 19146, 'synset': 'nut-leaved_screw_tree.n.01', 'name': 'nut-leaved_screw_tree'}, {'id': 19147, 'synset': 'red_beech.n.02', 'name': 'red_beech'}, {'id': 19148, 'synset': 'looking_glass_tree.n.01', 'name': 'looking_glass_tree'}, {'id': 19149, 'synset': 'looking-glass_plant.n.01', 'name': 'looking-glass_plant'}, {'id': 19150, 'synset': 'honey_bell.n.01', 'name': 'honey_bell'}, {'id': 19151, 'synset': 'mayeng.n.01', 'name': 'mayeng'}, {'id': 19152, 'synset': 'silver_tree.n.02', 'name': 'silver_tree'}, {'id': 19153, 'synset': 'cacao.n.01', 'name': 'cacao'}, {'id': 19154, 'synset': 'obeche.n.02', 'name': 'obeche'}, {'id': 19155, 'synset': 'linden.n.02', 'name': 'linden'}, {'id': 19156, 'synset': 'american_basswood.n.01', 'name': 'American_basswood'}, {'id': 19157, 'synset': 'small-leaved_linden.n.01', 'name': 'small-leaved_linden'}, {'id': 19158, 'synset': 'white_basswood.n.01', 'name': 'white_basswood'}, {'id': 19159, 'synset': 'japanese_linden.n.01', 'name': 'Japanese_linden'}, {'id': 19160, 'synset': 'silver_lime.n.01', 'name': 'silver_lime'}, {'id': 19161, 'synset': 'corchorus.n.01', 'name': 'corchorus'}, {'id': 19162, 'synset': 'african_hemp.n.02', 'name': 'African_hemp'}, {'id': 19163, 'synset': 'herb.n.01', 'name': 'herb'}, {'id': 19164, 'synset': 'protea.n.01', 'name': 'protea'}, {'id': 19165, 'synset': 'honeypot.n.01', 'name': 'honeypot'}, {'id': 19166, 'synset': 'honeyflower.n.02', 'name': 'honeyflower'}, {'id': 19167, 'synset': 'banksia.n.01', 'name': 'banksia'}, {'id': 19168, 'synset': 'honeysuckle.n.02', 'name': 'honeysuckle'}, {'id': 19169, 'synset': 'smoke_bush.n.02', 'name': 'smoke_bush'}, {'id': 19170, 'synset': 'chilean_firebush.n.01', 'name': 'Chilean_firebush'}, {'id': 19171, 'synset': 'chilean_nut.n.01', 'name': 'Chilean_nut'}, {'id': 19172, 'synset': 'grevillea.n.01', 'name': 'grevillea'}, {'id': 19173, 'synset': 'red-flowered_silky_oak.n.01', 'name': 'red-flowered_silky_oak'}, {'id': 19174, 'synset': 'silky_oak.n.01', 'name': 'silky_oak'}, {'id': 19175, 'synset': 'beefwood.n.05', 'name': 'beefwood'}, {'id': 19176, 'synset': 'cushion_flower.n.01', 'name': 'cushion_flower'}, {'id': 19177, 'synset': 'rewa-rewa.n.01', 'name': 'rewa-rewa'}, {'id': 19178, 'synset': 'honeyflower.n.01', 'name': 'honeyflower'}, {'id': 19179, 'synset': 'silver_tree.n.01', 'name': 'silver_tree'}, {'id': 19180, 'synset': 'lomatia.n.01', 'name': 'lomatia'}, {'id': 19181, 'synset': 'macadamia.n.01', 'name': 'macadamia'}, {'id': 19182, 'synset': 'macadamia_integrifolia.n.01', 'name': 'Macadamia_integrifolia'}, {'id': 19183, 'synset': 'macadamia_nut.n.01', 'name': 'macadamia_nut'}, {'id': 19184, 'synset': 'queensland_nut.n.01', 'name': 'Queensland_nut'}, {'id': 19185, 'synset': 'prickly_ash.n.02', 'name': 'prickly_ash'}, {'id': 19186, 'synset': 'geebung.n.01', 'name': 'geebung'}, {'id': 19187, 'synset': 'wheel_tree.n.01', 'name': 'wheel_tree'}, {'id': 19188, 'synset': 'scrub_beefwood.n.01', 'name': 'scrub_beefwood'}, {'id': 19189, 'synset': 'waratah.n.02', 'name': 'waratah'}, {'id': 19190, 'synset': 'waratah.n.01', 'name': 'waratah'}, {'id': 19191, 'synset': 'casuarina.n.01', 'name': 'casuarina'}, {'id': 19192, 'synset': 'she-oak.n.01', 'name': 'she-oak'}, {'id': 19193, 'synset': 'beefwood.n.03', 'name': 'beefwood'}, {'id': 19194, 'synset': 'australian_pine.n.01', 'name': 'Australian_pine'}, {'id': 19195, 'synset': 'heath.n.01', 'name': 'heath'}, {'id': 19196, 'synset': 'tree_heath.n.02', 'name': 'tree_heath'}, {'id': 19197, 'synset': 'briarroot.n.01', 'name': 'briarroot'}, {'id': 19198, 'synset': 'winter_heath.n.01', 'name': 'winter_heath'}, {'id': 19199, 'synset': 'bell_heather.n.02', 'name': 'bell_heather'}, {'id': 19200, 'synset': 'cornish_heath.n.01', 'name': 'Cornish_heath'}, {'id': 19201, 'synset': 'spanish_heath.n.01', 'name': 'Spanish_heath'}, {'id': 19202, 'synset': "prince-of-wales'-heath.n.01", 'name': "Prince-of-Wales'-heath"}, {'id': 19203, 'synset': 'bog_rosemary.n.01', 'name': 'bog_rosemary'}, {'id': 19204, 'synset': 'marsh_andromeda.n.01', 'name': 'marsh_andromeda'}, {'id': 19205, 'synset': 'madrona.n.01', 'name': 'madrona'}, {'id': 19206, 'synset': 'strawberry_tree.n.01', 'name': 'strawberry_tree'}, {'id': 19207, 'synset': 'bearberry.n.03', 'name': 'bearberry'}, {'id': 19208, 'synset': 'alpine_bearberry.n.01', 'name': 'alpine_bearberry'}, {'id': 19209, 'synset': 'heartleaf_manzanita.n.01', 'name': 'heartleaf_manzanita'}, {'id': 19210, 'synset': 'parry_manzanita.n.01', 'name': 'Parry_manzanita'}, {'id': 19211, 'synset': 'spike_heath.n.01', 'name': 'spike_heath'}, {'id': 19212, 'synset': 'bryanthus.n.01', 'name': 'bryanthus'}, {'id': 19213, 'synset': 'leatherleaf.n.02', 'name': 'leatherleaf'}, {'id': 19214, 'synset': 'connemara_heath.n.01', 'name': 'Connemara_heath'}, {'id': 19215, 'synset': 'trailing_arbutus.n.01', 'name': 'trailing_arbutus'}, {'id': 19216, 'synset': 'creeping_snowberry.n.01', 'name': 'creeping_snowberry'}, {'id': 19217, 'synset': 'salal.n.01', 'name': 'salal'}, {'id': 19218, 'synset': 'huckleberry.n.02', 'name': 'huckleberry'}, {'id': 19219, 'synset': 'black_huckleberry.n.01', 'name': 'black_huckleberry'}, {'id': 19220, 'synset': 'dangleberry.n.01', 'name': 'dangleberry'}, {'id': 19221, 'synset': 'box_huckleberry.n.01', 'name': 'box_huckleberry'}, {'id': 19222, 'synset': 'kalmia.n.01', 'name': 'kalmia'}, {'id': 19223, 'synset': 'mountain_laurel.n.01', 'name': 'mountain_laurel'}, {'id': 19224, 'synset': 'swamp_laurel.n.01', 'name': 'swamp_laurel'}, {'id': 19225, 'synset': "trapper's_tea.n.01", 'name': "trapper's_tea"}, {'id': 19226, 'synset': 'wild_rosemary.n.01', 'name': 'wild_rosemary'}, {'id': 19227, 'synset': 'sand_myrtle.n.01', 'name': 'sand_myrtle'}, {'id': 19228, 'synset': 'leucothoe.n.01', 'name': 'leucothoe'}, {'id': 19229, 'synset': 'dog_laurel.n.01', 'name': 'dog_laurel'}, {'id': 19230, 'synset': 'sweet_bells.n.01', 'name': 'sweet_bells'}, {'id': 19231, 'synset': 'alpine_azalea.n.01', 'name': 'alpine_azalea'}, {'id': 19232, 'synset': 'staggerbush.n.01', 'name': 'staggerbush'}, {'id': 19233, 'synset': 'maleberry.n.01', 'name': 'maleberry'}, {'id': 19234, 'synset': 'fetterbush.n.02', 'name': 'fetterbush'}, {'id': 19235, 'synset': 'false_azalea.n.01', 'name': 'false_azalea'}, {'id': 19236, 'synset': 'minniebush.n.01', 'name': 'minniebush'}, {'id': 19237, 'synset': 'sorrel_tree.n.01', 'name': 'sorrel_tree'}, {'id': 19238, 'synset': 'mountain_heath.n.01', 'name': 'mountain_heath'}, {'id': 19239, 'synset': 'purple_heather.n.01', 'name': 'purple_heather'}, {'id': 19240, 'synset': 'fetterbush.n.01', 'name': 'fetterbush'}, {'id': 19241, 'synset': 'rhododendron.n.01', 'name': 'rhododendron'}, {'id': 19242, 'synset': 'coast_rhododendron.n.01', 'name': 'coast_rhododendron'}, {'id': 19243, 'synset': 'rosebay.n.01', 'name': 'rosebay'}, {'id': 19244, 'synset': 'swamp_azalea.n.01', 'name': 'swamp_azalea'}, {'id': 19245, 'synset': 'azalea.n.01', 'name': 'azalea'}, {'id': 19246, 'synset': 'cranberry.n.01', 'name': 'cranberry'}, {'id': 19247, 'synset': 'american_cranberry.n.01', 'name': 'American_cranberry'}, {'id': 19248, 'synset': 'european_cranberry.n.01', 'name': 'European_cranberry'}, {'id': 19249, 'synset': 'blueberry.n.01', 'name': 'blueberry'}, {'id': 19250, 'synset': 'farkleberry.n.01', 'name': 'farkleberry'}, {'id': 19251, 'synset': 'low-bush_blueberry.n.01', 'name': 'low-bush_blueberry'}, {'id': 19252, 'synset': 'rabbiteye_blueberry.n.01', 'name': 'rabbiteye_blueberry'}, {'id': 19253, 'synset': 'dwarf_bilberry.n.01', 'name': 'dwarf_bilberry'}, {'id': 19254, 'synset': 'evergreen_blueberry.n.01', 'name': 'evergreen_blueberry'}, {'id': 19255, 'synset': 'evergreen_huckleberry.n.01', 'name': 'evergreen_huckleberry'}, {'id': 19256, 'synset': 'bilberry.n.02', 'name': 'bilberry'}, {'id': 19257, 'synset': 'bilberry.n.01', 'name': 'bilberry'}, {'id': 19258, 'synset': 'bog_bilberry.n.01', 'name': 'bog_bilberry'}, {'id': 19259, 'synset': 'dryland_blueberry.n.01', 'name': 'dryland_blueberry'}, {'id': 19260, 'synset': 'grouseberry.n.01', 'name': 'grouseberry'}, {'id': 19261, 'synset': 'deerberry.n.01', 'name': 'deerberry'}, {'id': 19262, 'synset': 'cowberry.n.01', 'name': 'cowberry'}, {'id': 19263, 'synset': 'diapensia.n.01', 'name': 'diapensia'}, {'id': 19264, 'synset': 'galax.n.01', 'name': 'galax'}, {'id': 19265, 'synset': 'pyxie.n.01', 'name': 'pyxie'}, {'id': 19266, 'synset': 'shortia.n.01', 'name': 'shortia'}, {'id': 19267, 'synset': 'oconee_bells.n.01', 'name': 'oconee_bells'}, {'id': 19268, 'synset': 'australian_heath.n.01', 'name': 'Australian_heath'}, {'id': 19269, 'synset': 'epacris.n.01', 'name': 'epacris'}, {'id': 19270, 'synset': 'common_heath.n.02', 'name': 'common_heath'}, {'id': 19271, 'synset': 'common_heath.n.01', 'name': 'common_heath'}, {'id': 19272, 'synset': 'port_jackson_heath.n.01', 'name': 'Port_Jackson_heath'}, {'id': 19273, 'synset': 'native_cranberry.n.01', 'name': 'native_cranberry'}, {'id': 19274, 'synset': 'pink_fivecorner.n.01', 'name': 'pink_fivecorner'}, {'id': 19275, 'synset': 'wintergreen.n.01', 'name': 'wintergreen'}, {'id': 19276, 'synset': 'false_wintergreen.n.01', 'name': 'false_wintergreen'}, {'id': 19277, 'synset': 'lesser_wintergreen.n.01', 'name': 'lesser_wintergreen'}, {'id': 19278, 'synset': 'wild_lily_of_the_valley.n.02', 'name': 'wild_lily_of_the_valley'}, {'id': 19279, 'synset': 'wild_lily_of_the_valley.n.01', 'name': 'wild_lily_of_the_valley'}, {'id': 19280, 'synset': 'pipsissewa.n.01', 'name': 'pipsissewa'}, {'id': 19281, 'synset': 'love-in-winter.n.01', 'name': 'love-in-winter'}, {'id': 19282, 'synset': 'one-flowered_wintergreen.n.01', 'name': 'one-flowered_wintergreen'}, {'id': 19283, 'synset': 'indian_pipe.n.01', 'name': 'Indian_pipe'}, {'id': 19284, 'synset': 'pinesap.n.01', 'name': 'pinesap'}, {'id': 19285, 'synset': 'beech.n.01', 'name': 'beech'}, {'id': 19286, 'synset': 'common_beech.n.01', 'name': 'common_beech'}, {'id': 19287, 'synset': 'copper_beech.n.01', 'name': 'copper_beech'}, {'id': 19288, 'synset': 'american_beech.n.01', 'name': 'American_beech'}, {'id': 19289, 'synset': 'weeping_beech.n.01', 'name': 'weeping_beech'}, {'id': 19290, 'synset': 'japanese_beech.n.01', 'name': 'Japanese_beech'}, {'id': 19291, 'synset': 'chestnut.n.02', 'name': 'chestnut'}, {'id': 19292, 'synset': 'american_chestnut.n.01', 'name': 'American_chestnut'}, {'id': 19293, 'synset': 'european_chestnut.n.01', 'name': 'European_chestnut'}, {'id': 19294, 'synset': 'chinese_chestnut.n.01', 'name': 'Chinese_chestnut'}, {'id': 19295, 'synset': 'japanese_chestnut.n.01', 'name': 'Japanese_chestnut'}, {'id': 19296, 'synset': 'allegheny_chinkapin.n.01', 'name': 'Allegheny_chinkapin'}, {'id': 19297, 'synset': 'ozark_chinkapin.n.01', 'name': 'Ozark_chinkapin'}, {'id': 19298, 'synset': 'oak_chestnut.n.01', 'name': 'oak_chestnut'}, {'id': 19299, 'synset': 'giant_chinkapin.n.01', 'name': 'giant_chinkapin'}, {'id': 19300, 'synset': 'dwarf_golden_chinkapin.n.01', 'name': 'dwarf_golden_chinkapin'}, {'id': 19301, 'synset': 'tanbark_oak.n.01', 'name': 'tanbark_oak'}, {'id': 19302, 'synset': 'japanese_oak.n.02', 'name': 'Japanese_oak'}, {'id': 19303, 'synset': 'southern_beech.n.01', 'name': 'southern_beech'}, {'id': 19304, 'synset': 'myrtle_beech.n.01', 'name': 'myrtle_beech'}, {'id': 19305, 'synset': 'coigue.n.01', 'name': 'Coigue'}, {'id': 19306, 'synset': 'new_zealand_beech.n.01', 'name': 'New_Zealand_beech'}, {'id': 19307, 'synset': 'silver_beech.n.01', 'name': 'silver_beech'}, {'id': 19308, 'synset': 'roble_beech.n.01', 'name': 'roble_beech'}, {'id': 19309, 'synset': 'rauli_beech.n.01', 'name': 'rauli_beech'}, {'id': 19310, 'synset': 'black_beech.n.01', 'name': 'black_beech'}, {'id': 19311, 'synset': 'hard_beech.n.01', 'name': 'hard_beech'}, {'id': 19312, 'synset': 'acorn.n.01', 'name': 'acorn'}, {'id': 19313, 'synset': 'cupule.n.01', 'name': 'cupule'}, {'id': 19314, 'synset': 'oak.n.02', 'name': 'oak'}, {'id': 19315, 'synset': 'live_oak.n.01', 'name': 'live_oak'}, {'id': 19316, 'synset': 'coast_live_oak.n.01', 'name': 'coast_live_oak'}, {'id': 19317, 'synset': 'white_oak.n.01', 'name': 'white_oak'}, {'id': 19318, 'synset': 'american_white_oak.n.01', 'name': 'American_white_oak'}, {'id': 19319, 'synset': 'arizona_white_oak.n.01', 'name': 'Arizona_white_oak'}, {'id': 19320, 'synset': 'swamp_white_oak.n.01', 'name': 'swamp_white_oak'}, {'id': 19321, 'synset': 'european_turkey_oak.n.01', 'name': 'European_turkey_oak'}, {'id': 19322, 'synset': 'canyon_oak.n.01', 'name': 'canyon_oak'}, {'id': 19323, 'synset': 'scarlet_oak.n.01', 'name': 'scarlet_oak'}, {'id': 19324, 'synset': 'jack_oak.n.02', 'name': 'jack_oak'}, {'id': 19325, 'synset': 'red_oak.n.01', 'name': 'red_oak'}, {'id': 19326, 'synset': 'southern_red_oak.n.01', 'name': 'southern_red_oak'}, {'id': 19327, 'synset': 'oregon_white_oak.n.01', 'name': 'Oregon_white_oak'}, {'id': 19328, 'synset': 'holm_oak.n.02', 'name': 'holm_oak'}, {'id': 19329, 'synset': 'bear_oak.n.01', 'name': 'bear_oak'}, {'id': 19330, 'synset': 'shingle_oak.n.01', 'name': 'shingle_oak'}, {'id': 19331, 'synset': 'bluejack_oak.n.01', 'name': 'bluejack_oak'}, {'id': 19332, 'synset': 'california_black_oak.n.01', 'name': 'California_black_oak'}, {'id': 19333, 'synset': 'american_turkey_oak.n.01', 'name': 'American_turkey_oak'}, {'id': 19334, 'synset': 'laurel_oak.n.01', 'name': 'laurel_oak'}, {'id': 19335, 'synset': 'california_white_oak.n.01', 'name': 'California_white_oak'}, {'id': 19336, 'synset': 'overcup_oak.n.01', 'name': 'overcup_oak'}, {'id': 19337, 'synset': 'bur_oak.n.01', 'name': 'bur_oak'}, {'id': 19338, 'synset': 'scrub_oak.n.01', 'name': 'scrub_oak'}, {'id': 19339, 'synset': 'blackjack_oak.n.01', 'name': 'blackjack_oak'}, {'id': 19340, 'synset': 'swamp_chestnut_oak.n.01', 'name': 'swamp_chestnut_oak'}, {'id': 19341, 'synset': 'japanese_oak.n.01', 'name': 'Japanese_oak'}, {'id': 19342, 'synset': 'chestnut_oak.n.01', 'name': 'chestnut_oak'}, {'id': 19343, 'synset': 'chinquapin_oak.n.01', 'name': 'chinquapin_oak'}, {'id': 19344, 'synset': 'myrtle_oak.n.01', 'name': 'myrtle_oak'}, {'id': 19345, 'synset': 'water_oak.n.01', 'name': 'water_oak'}, {'id': 19346, 'synset': 'nuttall_oak.n.01', 'name': 'Nuttall_oak'}, {'id': 19347, 'synset': 'durmast.n.01', 'name': 'durmast'}, {'id': 19348, 'synset': 'basket_oak.n.01', 'name': 'basket_oak'}, {'id': 19349, 'synset': 'pin_oak.n.01', 'name': 'pin_oak'}, {'id': 19350, 'synset': 'willow_oak.n.01', 'name': 'willow_oak'}, {'id': 19351, 'synset': 'dwarf_chinkapin_oak.n.01', 'name': 'dwarf_chinkapin_oak'}, {'id': 19352, 'synset': 'common_oak.n.01', 'name': 'common_oak'}, {'id': 19353, 'synset': 'northern_red_oak.n.01', 'name': 'northern_red_oak'}, {'id': 19354, 'synset': 'shumard_oak.n.01', 'name': 'Shumard_oak'}, {'id': 19355, 'synset': 'post_oak.n.01', 'name': 'post_oak'}, {'id': 19356, 'synset': 'cork_oak.n.01', 'name': 'cork_oak'}, {'id': 19357, 'synset': 'spanish_oak.n.01', 'name': 'Spanish_oak'}, {'id': 19358, 'synset': 'huckleberry_oak.n.01', 'name': 'huckleberry_oak'}, {'id': 19359, 'synset': 'chinese_cork_oak.n.01', 'name': 'Chinese_cork_oak'}, {'id': 19360, 'synset': 'black_oak.n.01', 'name': 'black_oak'}, {'id': 19361, 'synset': 'southern_live_oak.n.01', 'name': 'southern_live_oak'}, {'id': 19362, 'synset': 'interior_live_oak.n.01', 'name': 'interior_live_oak'}, {'id': 19363, 'synset': 'mast.n.02', 'name': 'mast'}, {'id': 19364, 'synset': 'birch.n.02', 'name': 'birch'}, {'id': 19365, 'synset': 'yellow_birch.n.01', 'name': 'yellow_birch'}, {'id': 19366, 'synset': 'american_white_birch.n.01', 'name': 'American_white_birch'}, {'id': 19367, 'synset': 'grey_birch.n.01', 'name': 'grey_birch'}, {'id': 19368, 'synset': 'silver_birch.n.01', 'name': 'silver_birch'}, {'id': 19369, 'synset': 'downy_birch.n.01', 'name': 'downy_birch'}, {'id': 19370, 'synset': 'black_birch.n.02', 'name': 'black_birch'}, {'id': 19371, 'synset': 'sweet_birch.n.01', 'name': 'sweet_birch'}, {'id': 19372, 'synset': 'yukon_white_birch.n.01', 'name': 'Yukon_white_birch'}, {'id': 19373, 'synset': 'swamp_birch.n.01', 'name': 'swamp_birch'}, {'id': 19374, 'synset': 'newfoundland_dwarf_birch.n.01', 'name': 'Newfoundland_dwarf_birch'}, {'id': 19375, 'synset': 'alder.n.02', 'name': 'alder'}, {'id': 19376, 'synset': 'common_alder.n.01', 'name': 'common_alder'}, {'id': 19377, 'synset': 'grey_alder.n.01', 'name': 'grey_alder'}, {'id': 19378, 'synset': 'seaside_alder.n.01', 'name': 'seaside_alder'}, {'id': 19379, 'synset': 'white_alder.n.01', 'name': 'white_alder'}, {'id': 19380, 'synset': 'red_alder.n.01', 'name': 'red_alder'}, {'id': 19381, 'synset': 'speckled_alder.n.01', 'name': 'speckled_alder'}, {'id': 19382, 'synset': 'smooth_alder.n.01', 'name': 'smooth_alder'}, {'id': 19383, 'synset': 'green_alder.n.02', 'name': 'green_alder'}, {'id': 19384, 'synset': 'green_alder.n.01', 'name': 'green_alder'}, {'id': 19385, 'synset': 'hornbeam.n.01', 'name': 'hornbeam'}, {'id': 19386, 'synset': 'european_hornbeam.n.01', 'name': 'European_hornbeam'}, {'id': 19387, 'synset': 'american_hornbeam.n.01', 'name': 'American_hornbeam'}, {'id': 19388, 'synset': 'hop_hornbeam.n.01', 'name': 'hop_hornbeam'}, {'id': 19389, 'synset': 'old_world_hop_hornbeam.n.01', 'name': 'Old_World_hop_hornbeam'}, {'id': 19390, 'synset': 'eastern_hop_hornbeam.n.01', 'name': 'Eastern_hop_hornbeam'}, {'id': 19391, 'synset': 'hazelnut.n.01', 'name': 'hazelnut'}, {'id': 19392, 'synset': 'american_hazel.n.01', 'name': 'American_hazel'}, {'id': 19393, 'synset': 'cobnut.n.01', 'name': 'cobnut'}, {'id': 19394, 'synset': 'beaked_hazelnut.n.01', 'name': 'beaked_hazelnut'}, {'id': 19395, 'synset': 'centaury.n.01', 'name': 'centaury'}, {'id': 19396, 'synset': 'rosita.n.01', 'name': 'rosita'}, {'id': 19397, 'synset': 'lesser_centaury.n.01', 'name': 'lesser_centaury'}, {'id': 19398, 'synset': 'seaside_centaury.n.01', 'name': 'seaside_centaury'}, {'id': 19399, 'synset': 'slender_centaury.n.01', 'name': 'slender_centaury'}, {'id': 19400, 'synset': 'prairie_gentian.n.01', 'name': 'prairie_gentian'}, {'id': 19401, 'synset': 'persian_violet.n.01', 'name': 'Persian_violet'}, {'id': 19402, 'synset': 'columbo.n.01', 'name': 'columbo'}, {'id': 19403, 'synset': 'gentian.n.01', 'name': 'gentian'}, {'id': 19404, 'synset': 'gentianella.n.02', 'name': 'gentianella'}, {'id': 19405, 'synset': 'closed_gentian.n.02', 'name': 'closed_gentian'}, {'id': 19406, 'synset': "explorer's_gentian.n.01", 'name': "explorer's_gentian"}, {'id': 19407, 'synset': 'closed_gentian.n.01', 'name': 'closed_gentian'}, {'id': 19408, 'synset': 'great_yellow_gentian.n.01', 'name': 'great_yellow_gentian'}, {'id': 19409, 'synset': 'marsh_gentian.n.01', 'name': 'marsh_gentian'}, {'id': 19410, 'synset': 'soapwort_gentian.n.01', 'name': 'soapwort_gentian'}, {'id': 19411, 'synset': 'striped_gentian.n.01', 'name': 'striped_gentian'}, {'id': 19412, 'synset': 'agueweed.n.01', 'name': 'agueweed'}, {'id': 19413, 'synset': 'felwort.n.01', 'name': 'felwort'}, {'id': 19414, 'synset': 'fringed_gentian.n.01', 'name': 'fringed_gentian'}, {'id': 19415, 'synset': 'gentianopsis_crinita.n.01', 'name': 'Gentianopsis_crinita'}, {'id': 19416, 'synset': 'gentianopsis_detonsa.n.01', 'name': 'Gentianopsis_detonsa'}, {'id': 19417, 'synset': 'gentianopsid_procera.n.01', 'name': 'Gentianopsid_procera'}, {'id': 19418, 'synset': 'gentianopsis_thermalis.n.01', 'name': 'Gentianopsis_thermalis'}, {'id': 19419, 'synset': 'tufted_gentian.n.01', 'name': 'tufted_gentian'}, {'id': 19420, 'synset': 'spurred_gentian.n.01', 'name': 'spurred_gentian'}, {'id': 19421, 'synset': 'sabbatia.n.01', 'name': 'sabbatia'}, {'id': 19422, 'synset': 'toothbrush_tree.n.01', 'name': 'toothbrush_tree'}, {'id': 19423, 'synset': 'olive_tree.n.01', 'name': 'olive_tree'}, {'id': 19424, 'synset': 'olive.n.02', 'name': 'olive'}, {'id': 19425, 'synset': 'olive.n.01', 'name': 'olive'}, {'id': 19426, 'synset': 'black_maire.n.01', 'name': 'black_maire'}, {'id': 19427, 'synset': 'white_maire.n.01', 'name': 'white_maire'}, {'id': 19428, 'synset': 'fringe_tree.n.01', 'name': 'fringe_tree'}, {'id': 19429, 'synset': 'fringe_bush.n.01', 'name': 'fringe_bush'}, {'id': 19430, 'synset': 'forestiera.n.01', 'name': 'forestiera'}, {'id': 19431, 'synset': 'forsythia.n.01', 'name': 'forsythia'}, {'id': 19432, 'synset': 'ash.n.02', 'name': 'ash'}, {'id': 19433, 'synset': 'white_ash.n.02', 'name': 'white_ash'}, {'id': 19434, 'synset': 'swamp_ash.n.01', 'name': 'swamp_ash'}, {'id': 19435, 'synset': 'flowering_ash.n.03', 'name': 'flowering_ash'}, {'id': 19436, 'synset': 'european_ash.n.01', 'name': 'European_ash'}, {'id': 19437, 'synset': 'oregon_ash.n.01', 'name': 'Oregon_ash'}, {'id': 19438, 'synset': 'black_ash.n.01', 'name': 'black_ash'}, {'id': 19439, 'synset': 'manna_ash.n.01', 'name': 'manna_ash'}, {'id': 19440, 'synset': 'red_ash.n.01', 'name': 'red_ash'}, {'id': 19441, 'synset': 'green_ash.n.01', 'name': 'green_ash'}, {'id': 19442, 'synset': 'blue_ash.n.01', 'name': 'blue_ash'}, {'id': 19443, 'synset': 'mountain_ash.n.03', 'name': 'mountain_ash'}, {'id': 19444, 'synset': 'pumpkin_ash.n.01', 'name': 'pumpkin_ash'}, {'id': 19445, 'synset': 'arizona_ash.n.01', 'name': 'Arizona_ash'}, {'id': 19446, 'synset': 'jasmine.n.01', 'name': 'jasmine'}, {'id': 19447, 'synset': 'primrose_jasmine.n.01', 'name': 'primrose_jasmine'}, {'id': 19448, 'synset': 'winter_jasmine.n.01', 'name': 'winter_jasmine'}, {'id': 19449, 'synset': 'common_jasmine.n.01', 'name': 'common_jasmine'}, {'id': 19450, 'synset': 'privet.n.01', 'name': 'privet'}, {'id': 19451, 'synset': 'amur_privet.n.01', 'name': 'Amur_privet'}, {'id': 19452, 'synset': 'japanese_privet.n.01', 'name': 'Japanese_privet'}, {'id': 19453, 'synset': 'ligustrum_obtusifolium.n.01', 'name': 'Ligustrum_obtusifolium'}, {'id': 19454, 'synset': 'common_privet.n.01', 'name': 'common_privet'}, {'id': 19455, 'synset': 'devilwood.n.01', 'name': 'devilwood'}, {'id': 19456, 'synset': 'mock_privet.n.01', 'name': 'mock_privet'}, {'id': 19457, 'synset': 'lilac.n.01', 'name': 'lilac'}, {'id': 19458, 'synset': 'himalayan_lilac.n.01', 'name': 'Himalayan_lilac'}, {'id': 19459, 'synset': 'persian_lilac.n.02', 'name': 'Persian_lilac'}, {'id': 19460, 'synset': 'japanese_tree_lilac.n.01', 'name': 'Japanese_tree_lilac'}, {'id': 19461, 'synset': 'japanese_lilac.n.01', 'name': 'Japanese_lilac'}, {'id': 19462, 'synset': 'common_lilac.n.01', 'name': 'common_lilac'}, {'id': 19463, 'synset': 'bloodwort.n.01', 'name': 'bloodwort'}, {'id': 19464, 'synset': 'kangaroo_paw.n.01', 'name': 'kangaroo_paw'}, {'id': 19465, 'synset': 'virginian_witch_hazel.n.01', 'name': 'Virginian_witch_hazel'}, {'id': 19466, 'synset': 'vernal_witch_hazel.n.01', 'name': 'vernal_witch_hazel'}, {'id': 19467, 'synset': 'winter_hazel.n.01', 'name': 'winter_hazel'}, {'id': 19468, 'synset': 'fothergilla.n.01', 'name': 'fothergilla'}, {'id': 19469, 'synset': 'liquidambar.n.02', 'name': 'liquidambar'}, {'id': 19470, 'synset': 'sweet_gum.n.03', 'name': 'sweet_gum'}, {'id': 19471, 'synset': 'iron_tree.n.01', 'name': 'iron_tree'}, {'id': 19472, 'synset': 'walnut.n.03', 'name': 'walnut'}, {'id': 19473, 'synset': 'california_black_walnut.n.01', 'name': 'California_black_walnut'}, {'id': 19474, 'synset': 'butternut.n.01', 'name': 'butternut'}, {'id': 19475, 'synset': 'black_walnut.n.01', 'name': 'black_walnut'}, {'id': 19476, 'synset': 'english_walnut.n.01', 'name': 'English_walnut'}, {'id': 19477, 'synset': 'hickory.n.02', 'name': 'hickory'}, {'id': 19478, 'synset': 'water_hickory.n.01', 'name': 'water_hickory'}, {'id': 19479, 'synset': 'pignut.n.01', 'name': 'pignut'}, {'id': 19480, 'synset': 'bitternut.n.01', 'name': 'bitternut'}, {'id': 19481, 'synset': 'pecan.n.02', 'name': 'pecan'}, {'id': 19482, 'synset': 'big_shellbark.n.01', 'name': 'big_shellbark'}, {'id': 19483, 'synset': 'nutmeg_hickory.n.01', 'name': 'nutmeg_hickory'}, {'id': 19484, 'synset': 'shagbark.n.01', 'name': 'shagbark'}, {'id': 19485, 'synset': 'mockernut.n.01', 'name': 'mockernut'}, {'id': 19486, 'synset': 'wing_nut.n.01', 'name': 'wing_nut'}, {'id': 19487, 'synset': 'caucasian_walnut.n.01', 'name': 'Caucasian_walnut'}, {'id': 19488, 'synset': 'dhawa.n.01', 'name': 'dhawa'}, {'id': 19489, 'synset': 'combretum.n.01', 'name': 'combretum'}, {'id': 19490, 'synset': 'hiccup_nut.n.01', 'name': 'hiccup_nut'}, {'id': 19491, 'synset': 'bush_willow.n.02', 'name': 'bush_willow'}, {'id': 19492, 'synset': 'bush_willow.n.01', 'name': 'bush_willow'}, {'id': 19493, 'synset': 'button_tree.n.01', 'name': 'button_tree'}, {'id': 19494, 'synset': 'white_mangrove.n.02', 'name': 'white_mangrove'}, {'id': 19495, 'synset': 'oleaster.n.01', 'name': 'oleaster'}, {'id': 19496, 'synset': 'water_milfoil.n.01', 'name': 'water_milfoil'}, {'id': 19497, 'synset': 'anchovy_pear.n.01', 'name': 'anchovy_pear'}, {'id': 19498, 'synset': 'brazil_nut.n.01', 'name': 'brazil_nut'}, {'id': 19499, 'synset': 'loosestrife.n.01', 'name': 'loosestrife'}, {'id': 19500, 'synset': 'purple_loosestrife.n.01', 'name': 'purple_loosestrife'}, {'id': 19501, 'synset': 'grass_poly.n.01', 'name': 'grass_poly'}, {'id': 19502, 'synset': 'crape_myrtle.n.01', 'name': 'crape_myrtle'}, {'id': 19503, 'synset': "queen's_crape_myrtle.n.01", 'name': "Queen's_crape_myrtle"}, {'id': 19504, 'synset': 'myrtaceous_tree.n.01', 'name': 'myrtaceous_tree'}, {'id': 19505, 'synset': 'myrtle.n.02', 'name': 'myrtle'}, {'id': 19506, 'synset': 'common_myrtle.n.01', 'name': 'common_myrtle'}, {'id': 19507, 'synset': 'bayberry.n.01', 'name': 'bayberry'}, {'id': 19508, 'synset': 'allspice.n.01', 'name': 'allspice'}, {'id': 19509, 'synset': 'allspice_tree.n.01', 'name': 'allspice_tree'}, {'id': 19510, 'synset': 'sour_cherry.n.02', 'name': 'sour_cherry'}, {'id': 19511, 'synset': 'nakedwood.n.02', 'name': 'nakedwood'}, {'id': 19512, 'synset': 'surinam_cherry.n.02', 'name': 'Surinam_cherry'}, {'id': 19513, 'synset': 'rose_apple.n.01', 'name': 'rose_apple'}, {'id': 19514, 'synset': 'feijoa.n.01', 'name': 'feijoa'}, {'id': 19515, 'synset': 'jaboticaba.n.01', 'name': 'jaboticaba'}, {'id': 19516, 'synset': 'guava.n.02', 'name': 'guava'}, {'id': 19517, 'synset': 'guava.n.01', 'name': 'guava'}, {'id': 19518, 'synset': 'cattley_guava.n.01', 'name': 'cattley_guava'}, {'id': 19519, 'synset': 'brazilian_guava.n.01', 'name': 'Brazilian_guava'}, {'id': 19520, 'synset': 'gum_tree.n.01', 'name': 'gum_tree'}, {'id': 19521, 'synset': 'eucalyptus.n.02', 'name': 'eucalyptus'}, {'id': 19522, 'synset': 'flooded_gum.n.01', 'name': 'flooded_gum'}, {'id': 19523, 'synset': 'mallee.n.01', 'name': 'mallee'}, {'id': 19524, 'synset': 'stringybark.n.01', 'name': 'stringybark'}, {'id': 19525, 'synset': 'smoothbark.n.01', 'name': 'smoothbark'}, {'id': 19526, 'synset': 'red_gum.n.03', 'name': 'red_gum'}, {'id': 19527, 'synset': 'red_gum.n.02', 'name': 'red_gum'}, {'id': 19528, 'synset': 'river_red_gum.n.01', 'name': 'river_red_gum'}, {'id': 19529, 'synset': 'mountain_swamp_gum.n.01', 'name': 'mountain_swamp_gum'}, {'id': 19530, 'synset': 'snow_gum.n.01', 'name': 'snow_gum'}, {'id': 19531, 'synset': 'alpine_ash.n.01', 'name': 'alpine_ash'}, {'id': 19532, 'synset': 'white_mallee.n.01', 'name': 'white_mallee'}, {'id': 19533, 'synset': 'white_stringybark.n.01', 'name': 'white_stringybark'}, {'id': 19534, 'synset': 'white_mountain_ash.n.01', 'name': 'white_mountain_ash'}, {'id': 19535, 'synset': 'blue_gum.n.01', 'name': 'blue_gum'}, {'id': 19536, 'synset': 'rose_gum.n.01', 'name': 'rose_gum'}, {'id': 19537, 'synset': 'cider_gum.n.01', 'name': 'cider_gum'}, {'id': 19538, 'synset': 'swamp_gum.n.01', 'name': 'swamp_gum'}, {'id': 19539, 'synset': 'spotted_gum.n.01', 'name': 'spotted_gum'}, {'id': 19540, 'synset': 'lemon-scented_gum.n.01', 'name': 'lemon-scented_gum'}, {'id': 19541, 'synset': 'black_mallee.n.01', 'name': 'black_mallee'}, {'id': 19542, 'synset': 'forest_red_gum.n.01', 'name': 'forest_red_gum'}, {'id': 19543, 'synset': 'mountain_ash.n.02', 'name': 'mountain_ash'}, {'id': 19544, 'synset': 'manna_gum.n.01', 'name': 'manna_gum'}, {'id': 19545, 'synset': 'clove.n.02', 'name': 'clove'}, {'id': 19546, 'synset': 'clove.n.01', 'name': 'clove'}, {'id': 19547, 'synset': 'tupelo.n.02', 'name': 'tupelo'}, {'id': 19548, 'synset': 'water_gum.n.01', 'name': 'water_gum'}, {'id': 19549, 'synset': 'sour_gum.n.01', 'name': 'sour_gum'}, {'id': 19550, 'synset': "enchanter's_nightshade.n.01", 'name': "enchanter's_nightshade"}, {'id': 19551, 'synset': 'circaea_lutetiana.n.01', 'name': 'Circaea_lutetiana'}, {'id': 19552, 'synset': 'willowherb.n.01', 'name': 'willowherb'}, {'id': 19553, 'synset': 'fireweed.n.01', 'name': 'fireweed'}, {'id': 19554, 'synset': 'california_fuchsia.n.01', 'name': 'California_fuchsia'}, {'id': 19555, 'synset': 'fuchsia.n.01', 'name': 'fuchsia'}, {'id': 19556, 'synset': "lady's-eardrop.n.01", 'name': "lady's-eardrop"}, {'id': 19557, 'synset': 'evening_primrose.n.01', 'name': 'evening_primrose'}, {'id': 19558, 'synset': 'common_evening_primrose.n.01', 'name': 'common_evening_primrose'}, {'id': 19559, 'synset': 'sundrops.n.01', 'name': 'sundrops'}, {'id': 19560, 'synset': 'missouri_primrose.n.01', 'name': 'Missouri_primrose'}, {'id': 19561, 'synset': 'pomegranate.n.01', 'name': 'pomegranate'}, {'id': 19562, 'synset': 'mangrove.n.01', 'name': 'mangrove'}, {'id': 19563, 'synset': 'daphne.n.01', 'name': 'daphne'}, {'id': 19564, 'synset': 'garland_flower.n.01', 'name': 'garland_flower'}, {'id': 19565, 'synset': 'spurge_laurel.n.01', 'name': 'spurge_laurel'}, {'id': 19566, 'synset': 'mezereon.n.01', 'name': 'mezereon'}, {'id': 19567, 'synset': 'indian_rhododendron.n.01', 'name': 'Indian_rhododendron'}, {'id': 19568, 'synset': 'medinilla_magnifica.n.01', 'name': 'Medinilla_magnifica'}, {'id': 19569, 'synset': 'deer_grass.n.01', 'name': 'deer_grass'}, {'id': 19570, 'synset': 'canna.n.01', 'name': 'canna'}, {'id': 19571, 'synset': 'achira.n.01', 'name': 'achira'}, {'id': 19572, 'synset': 'arrowroot.n.02', 'name': 'arrowroot'}, {'id': 19573, 'synset': 'banana.n.01', 'name': 'banana'}, {'id': 19574, 'synset': 'dwarf_banana.n.01', 'name': 'dwarf_banana'}, {'id': 19575, 'synset': 'japanese_banana.n.01', 'name': 'Japanese_banana'}, {'id': 19576, 'synset': 'plantain.n.02', 'name': 'plantain'}, {'id': 19577, 'synset': 'edible_banana.n.01', 'name': 'edible_banana'}, {'id': 19578, 'synset': 'abaca.n.02', 'name': 'abaca'}, {'id': 19579, 'synset': 'abyssinian_banana.n.01', 'name': 'Abyssinian_banana'}, {'id': 19580, 'synset': 'ginger.n.01', 'name': 'ginger'}, {'id': 19581, 'synset': 'common_ginger.n.01', 'name': 'common_ginger'}, {'id': 19582, 'synset': 'turmeric.n.01', 'name': 'turmeric'}, {'id': 19583, 'synset': 'galangal.n.01', 'name': 'galangal'}, {'id': 19584, 'synset': 'shellflower.n.02', 'name': 'shellflower'}, {'id': 19585, 'synset': 'grains_of_paradise.n.01', 'name': 'grains_of_paradise'}, {'id': 19586, 'synset': 'cardamom.n.01', 'name': 'cardamom'}, {'id': 19587, 'synset': 'begonia.n.01', 'name': 'begonia'}, {'id': 19588, 'synset': 'fibrous-rooted_begonia.n.01', 'name': 'fibrous-rooted_begonia'}, {'id': 19589, 'synset': 'tuberous_begonia.n.01', 'name': 'tuberous_begonia'}, {'id': 19590, 'synset': 'rhizomatous_begonia.n.01', 'name': 'rhizomatous_begonia'}, {'id': 19591, 'synset': 'christmas_begonia.n.01', 'name': 'Christmas_begonia'}, {'id': 19592, 'synset': 'angel-wing_begonia.n.01', 'name': 'angel-wing_begonia'}, {'id': 19593, 'synset': 'beefsteak_begonia.n.01', 'name': 'beefsteak_begonia'}, {'id': 19594, 'synset': 'star_begonia.n.01', 'name': 'star_begonia'}, {'id': 19595, 'synset': 'rex_begonia.n.01', 'name': 'rex_begonia'}, {'id': 19596, 'synset': 'wax_begonia.n.01', 'name': 'wax_begonia'}, {'id': 19597, 'synset': 'socotra_begonia.n.01', 'name': 'Socotra_begonia'}, {'id': 19598, 'synset': 'hybrid_tuberous_begonia.n.01', 'name': 'hybrid_tuberous_begonia'}, {'id': 19599, 'synset': 'dillenia.n.01', 'name': 'dillenia'}, {'id': 19600, 'synset': 'guinea_gold_vine.n.01', 'name': 'guinea_gold_vine'}, {'id': 19601, 'synset': 'poon.n.02', 'name': 'poon'}, {'id': 19602, 'synset': 'calaba.n.01', 'name': 'calaba'}, {'id': 19603, 'synset': 'maria.n.02', 'name': 'Maria'}, {'id': 19604, 'synset': 'laurelwood.n.01', 'name': 'laurelwood'}, {'id': 19605, 'synset': 'alexandrian_laurel.n.01', 'name': 'Alexandrian_laurel'}, {'id': 19606, 'synset': 'clusia.n.01', 'name': 'clusia'}, {'id': 19607, 'synset': 'wild_fig.n.02', 'name': 'wild_fig'}, {'id': 19608, 'synset': 'waxflower.n.02', 'name': 'waxflower'}, {'id': 19609, 'synset': 'pitch_apple.n.01', 'name': 'pitch_apple'}, {'id': 19610, 'synset': 'mangosteen.n.01', 'name': 'mangosteen'}, {'id': 19611, 'synset': 'gamboge_tree.n.01', 'name': 'gamboge_tree'}, {'id': 19612, 'synset': "st_john's_wort.n.01", 'name': "St_John's_wort"}, {'id': 19613, 'synset': "common_st_john's_wort.n.01", 'name': "common_St_John's_wort"}, {'id': 19614, 'synset': "great_st_john's_wort.n.01", 'name': "great_St_John's_wort"}, {'id': 19615, 'synset': "creeping_st_john's_wort.n.01", 'name': "creeping_St_John's_wort"}, {'id': 19616, 'synset': "low_st_andrew's_cross.n.01", 'name': "low_St_Andrew's_cross"}, {'id': 19617, 'synset': 'klammath_weed.n.01', 'name': 'klammath_weed'}, {'id': 19618, 'synset': "shrubby_st_john's_wort.n.01", 'name': "shrubby_St_John's_wort"}, {'id': 19619, 'synset': "st_peter's_wort.n.01", 'name': "St_Peter's_wort"}, {'id': 19620, 'synset': "marsh_st-john's_wort.n.01", 'name': "marsh_St-John's_wort"}, {'id': 19621, 'synset': 'mammee_apple.n.01', 'name': 'mammee_apple'}, {'id': 19622, 'synset': 'rose_chestnut.n.01', 'name': 'rose_chestnut'}, {'id': 19623, 'synset': 'bower_actinidia.n.01', 'name': 'bower_actinidia'}, {'id': 19624, 'synset': 'chinese_gooseberry.n.01', 'name': 'Chinese_gooseberry'}, {'id': 19625, 'synset': 'silvervine.n.01', 'name': 'silvervine'}, {'id': 19626, 'synset': 'wild_cinnamon.n.01', 'name': 'wild_cinnamon'}, {'id': 19627, 'synset': 'papaya.n.01', 'name': 'papaya'}, {'id': 19628, 'synset': 'souari.n.01', 'name': 'souari'}, {'id': 19629, 'synset': 'rockrose.n.02', 'name': 'rockrose'}, {'id': 19630, 'synset': 'white-leaved_rockrose.n.01', 'name': 'white-leaved_rockrose'}, {'id': 19631, 'synset': 'common_gum_cistus.n.01', 'name': 'common_gum_cistus'}, {'id': 19632, 'synset': 'frostweed.n.01', 'name': 'frostweed'}, {'id': 19633, 'synset': 'dipterocarp.n.01', 'name': 'dipterocarp'}, {'id': 19634, 'synset': 'red_lauan.n.02', 'name': 'red_lauan'}, {'id': 19635, 'synset': "governor's_plum.n.01", 'name': "governor's_plum"}, {'id': 19636, 'synset': 'kei_apple.n.01', 'name': 'kei_apple'}, {'id': 19637, 'synset': 'ketembilla.n.01', 'name': 'ketembilla'}, {'id': 19638, 'synset': 'chaulmoogra.n.01', 'name': 'chaulmoogra'}, {'id': 19639, 'synset': 'wild_peach.n.01', 'name': 'wild_peach'}, {'id': 19640, 'synset': 'candlewood.n.01', 'name': 'candlewood'}, {'id': 19641, 'synset': 'boojum_tree.n.01', 'name': 'boojum_tree'}, {'id': 19642, 'synset': "bird's-eye_bush.n.01", 'name': "bird's-eye_bush"}, {'id': 19643, 'synset': 'granadilla.n.03', 'name': 'granadilla'}, {'id': 19644, 'synset': 'granadilla.n.02', 'name': 'granadilla'}, {'id': 19645, 'synset': 'granadilla.n.01', 'name': 'granadilla'}, {'id': 19646, 'synset': 'maypop.n.01', 'name': 'maypop'}, {'id': 19647, 'synset': 'jamaica_honeysuckle.n.01', 'name': 'Jamaica_honeysuckle'}, {'id': 19648, 'synset': 'banana_passion_fruit.n.01', 'name': 'banana_passion_fruit'}, {'id': 19649, 'synset': 'sweet_calabash.n.01', 'name': 'sweet_calabash'}, {'id': 19650, 'synset': 'love-in-a-mist.n.01', 'name': 'love-in-a-mist'}, {'id': 19651, 'synset': 'reseda.n.01', 'name': 'reseda'}, {'id': 19652, 'synset': 'mignonette.n.01', 'name': 'mignonette'}, {'id': 19653, 'synset': "dyer's_rocket.n.01", 'name': "dyer's_rocket"}, {'id': 19654, 'synset': 'false_tamarisk.n.01', 'name': 'false_tamarisk'}, {'id': 19655, 'synset': 'halophyte.n.01', 'name': 'halophyte'}, {'id': 19656, 'synset': 'viola.n.01', 'name': 'viola'}, {'id': 19657, 'synset': 'violet.n.01', 'name': 'violet'}, {'id': 19658, 'synset': 'field_pansy.n.01', 'name': 'field_pansy'}, {'id': 19659, 'synset': 'american_dog_violet.n.01', 'name': 'American_dog_violet'}, {'id': 19660, 'synset': 'dog_violet.n.01', 'name': 'dog_violet'}, {'id': 19661, 'synset': 'horned_violet.n.01', 'name': 'horned_violet'}, {'id': 19662, 'synset': 'two-eyed_violet.n.01', 'name': 'two-eyed_violet'}, {'id': 19663, 'synset': "bird's-foot_violet.n.01", 'name': "bird's-foot_violet"}, {'id': 19664, 'synset': 'downy_yellow_violet.n.01', 'name': 'downy_yellow_violet'}, {'id': 19665, 'synset': 'long-spurred_violet.n.01', 'name': 'long-spurred_violet'}, {'id': 19666, 'synset': 'pale_violet.n.01', 'name': 'pale_violet'}, {'id': 19667, 'synset': 'hedge_violet.n.01', 'name': 'hedge_violet'}, {'id': 19668, 'synset': 'nettle.n.01', 'name': 'nettle'}, {'id': 19669, 'synset': 'stinging_nettle.n.01', 'name': 'stinging_nettle'}, {'id': 19670, 'synset': 'roman_nettle.n.01', 'name': 'Roman_nettle'}, {'id': 19671, 'synset': 'ramie.n.01', 'name': 'ramie'}, {'id': 19672, 'synset': 'wood_nettle.n.01', 'name': 'wood_nettle'}, {'id': 19673, 'synset': 'australian_nettle.n.01', 'name': 'Australian_nettle'}, {'id': 19674, 'synset': 'pellitory-of-the-wall.n.01', 'name': 'pellitory-of-the-wall'}, {'id': 19675, 'synset': 'richweed.n.02', 'name': 'richweed'}, {'id': 19676, 'synset': 'artillery_plant.n.01', 'name': 'artillery_plant'}, {'id': 19677, 'synset': 'friendship_plant.n.01', 'name': 'friendship_plant'}, {'id': 19678, 'synset': 'queensland_grass-cloth_plant.n.01', 'name': 'Queensland_grass-cloth_plant'}, {'id': 19679, 'synset': 'pipturus_albidus.n.01', 'name': 'Pipturus_albidus'}, {'id': 19680, 'synset': 'cannabis.n.01', 'name': 'cannabis'}, {'id': 19681, 'synset': 'indian_hemp.n.01', 'name': 'Indian_hemp'}, {'id': 19682, 'synset': 'mulberry.n.01', 'name': 'mulberry'}, {'id': 19683, 'synset': 'white_mulberry.n.01', 'name': 'white_mulberry'}, {'id': 19684, 'synset': 'black_mulberry.n.01', 'name': 'black_mulberry'}, {'id': 19685, 'synset': 'red_mulberry.n.01', 'name': 'red_mulberry'}, {'id': 19686, 'synset': 'osage_orange.n.01', 'name': 'osage_orange'}, {'id': 19687, 'synset': 'breadfruit.n.01', 'name': 'breadfruit'}, {'id': 19688, 'synset': 'jackfruit.n.01', 'name': 'jackfruit'}, {'id': 19689, 'synset': 'marang.n.01', 'name': 'marang'}, {'id': 19690, 'synset': 'fig_tree.n.01', 'name': 'fig_tree'}, {'id': 19691, 'synset': 'fig.n.02', 'name': 'fig'}, {'id': 19692, 'synset': 'caprifig.n.01', 'name': 'caprifig'}, {'id': 19693, 'synset': 'golden_fig.n.01', 'name': 'golden_fig'}, {'id': 19694, 'synset': 'banyan.n.01', 'name': 'banyan'}, {'id': 19695, 'synset': 'pipal.n.01', 'name': 'pipal'}, {'id': 19696, 'synset': 'india-rubber_tree.n.01', 'name': 'India-rubber_tree'}, {'id': 19697, 'synset': 'mistletoe_fig.n.01', 'name': 'mistletoe_fig'}, {'id': 19698, 'synset': 'port_jackson_fig.n.01', 'name': 'Port_Jackson_fig'}, {'id': 19699, 'synset': 'sycamore.n.04', 'name': 'sycamore'}, {'id': 19700, 'synset': 'paper_mulberry.n.01', 'name': 'paper_mulberry'}, {'id': 19701, 'synset': 'trumpetwood.n.01', 'name': 'trumpetwood'}, {'id': 19702, 'synset': 'elm.n.01', 'name': 'elm'}, {'id': 19703, 'synset': 'winged_elm.n.01', 'name': 'winged_elm'}, {'id': 19704, 'synset': 'american_elm.n.01', 'name': 'American_elm'}, {'id': 19705, 'synset': 'smooth-leaved_elm.n.01', 'name': 'smooth-leaved_elm'}, {'id': 19706, 'synset': 'cedar_elm.n.01', 'name': 'cedar_elm'}, {'id': 19707, 'synset': 'witch_elm.n.01', 'name': 'witch_elm'}, {'id': 19708, 'synset': 'dutch_elm.n.01', 'name': 'Dutch_elm'}, {'id': 19709, 'synset': 'huntingdon_elm.n.01', 'name': 'Huntingdon_elm'}, {'id': 19710, 'synset': 'water_elm.n.01', 'name': 'water_elm'}, {'id': 19711, 'synset': 'chinese_elm.n.02', 'name': 'Chinese_elm'}, {'id': 19712, 'synset': 'english_elm.n.01', 'name': 'English_elm'}, {'id': 19713, 'synset': 'siberian_elm.n.01', 'name': 'Siberian_elm'}, {'id': 19714, 'synset': 'slippery_elm.n.01', 'name': 'slippery_elm'}, {'id': 19715, 'synset': 'jersey_elm.n.01', 'name': 'Jersey_elm'}, {'id': 19716, 'synset': 'september_elm.n.01', 'name': 'September_elm'}, {'id': 19717, 'synset': 'rock_elm.n.01', 'name': 'rock_elm'}, {'id': 19718, 'synset': 'hackberry.n.01', 'name': 'hackberry'}, {'id': 19719, 'synset': 'european_hackberry.n.01', 'name': 'European_hackberry'}, {'id': 19720, 'synset': 'american_hackberry.n.01', 'name': 'American_hackberry'}, {'id': 19721, 'synset': 'sugarberry.n.01', 'name': 'sugarberry'}, {'id': 19722, 'synset': 'iridaceous_plant.n.01', 'name': 'iridaceous_plant'}, {'id': 19723, 'synset': 'bearded_iris.n.01', 'name': 'bearded_iris'}, {'id': 19724, 'synset': 'beardless_iris.n.01', 'name': 'beardless_iris'}, {'id': 19725, 'synset': 'orrisroot.n.01', 'name': 'orrisroot'}, {'id': 19726, 'synset': 'dwarf_iris.n.02', 'name': 'dwarf_iris'}, {'id': 19727, 'synset': 'dutch_iris.n.02', 'name': 'Dutch_iris'}, {'id': 19728, 'synset': 'florentine_iris.n.01', 'name': 'Florentine_iris'}, {'id': 19729, 'synset': 'stinking_iris.n.01', 'name': 'stinking_iris'}, {'id': 19730, 'synset': 'german_iris.n.02', 'name': 'German_iris'}, {'id': 19731, 'synset': 'japanese_iris.n.01', 'name': 'Japanese_iris'}, {'id': 19732, 'synset': 'german_iris.n.01', 'name': 'German_iris'}, {'id': 19733, 'synset': 'dalmatian_iris.n.01', 'name': 'Dalmatian_iris'}, {'id': 19734, 'synset': 'persian_iris.n.01', 'name': 'Persian_iris'}, {'id': 19735, 'synset': 'dutch_iris.n.01', 'name': 'Dutch_iris'}, {'id': 19736, 'synset': 'dwarf_iris.n.01', 'name': 'dwarf_iris'}, {'id': 19737, 'synset': 'spanish_iris.n.01', 'name': 'Spanish_iris'}, {'id': 19738, 'synset': 'blackberry-lily.n.01', 'name': 'blackberry-lily'}, {'id': 19739, 'synset': 'crocus.n.01', 'name': 'crocus'}, {'id': 19740, 'synset': 'saffron.n.01', 'name': 'saffron'}, {'id': 19741, 'synset': 'corn_lily.n.01', 'name': 'corn_lily'}, {'id': 19742, 'synset': 'blue-eyed_grass.n.01', 'name': 'blue-eyed_grass'}, {'id': 19743, 'synset': 'wandflower.n.01', 'name': 'wandflower'}, {'id': 19744, 'synset': 'amaryllis.n.01', 'name': 'amaryllis'}, {'id': 19745, 'synset': 'salsilla.n.02', 'name': 'salsilla'}, {'id': 19746, 'synset': 'salsilla.n.01', 'name': 'salsilla'}, {'id': 19747, 'synset': 'blood_lily.n.01', 'name': 'blood_lily'}, {'id': 19748, 'synset': 'cape_tulip.n.01', 'name': 'Cape_tulip'}, {'id': 19749, 'synset': 'hippeastrum.n.01', 'name': 'hippeastrum'}, {'id': 19750, 'synset': 'narcissus.n.01', 'name': 'narcissus'}, {'id': 19751, 'synset': 'daffodil.n.01', 'name': 'daffodil'}, {'id': 19752, 'synset': 'jonquil.n.01', 'name': 'jonquil'}, {'id': 19753, 'synset': 'jonquil.n.02', 'name': 'jonquil'}, {'id': 19754, 'synset': 'jacobean_lily.n.01', 'name': 'Jacobean_lily'}, {'id': 19755, 'synset': 'liliaceous_plant.n.01', 'name': 'liliaceous_plant'}, {'id': 19756, 'synset': 'mountain_lily.n.01', 'name': 'mountain_lily'}, {'id': 19757, 'synset': 'canada_lily.n.01', 'name': 'Canada_lily'}, {'id': 19758, 'synset': 'tiger_lily.n.02', 'name': 'tiger_lily'}, {'id': 19759, 'synset': 'columbia_tiger_lily.n.01', 'name': 'Columbia_tiger_lily'}, {'id': 19760, 'synset': 'tiger_lily.n.01', 'name': 'tiger_lily'}, {'id': 19761, 'synset': 'easter_lily.n.01', 'name': 'Easter_lily'}, {'id': 19762, 'synset': 'coast_lily.n.01', 'name': 'coast_lily'}, {'id': 19763, 'synset': "turk's-cap.n.02", 'name': "Turk's-cap"}, {'id': 19764, 'synset': 'michigan_lily.n.01', 'name': 'Michigan_lily'}, {'id': 19765, 'synset': 'leopard_lily.n.01', 'name': 'leopard_lily'}, {'id': 19766, 'synset': "turk's-cap.n.01", 'name': "Turk's-cap"}, {'id': 19767, 'synset': 'african_lily.n.01', 'name': 'African_lily'}, {'id': 19768, 'synset': 'colicroot.n.01', 'name': 'colicroot'}, {'id': 19769, 'synset': 'ague_root.n.01', 'name': 'ague_root'}, {'id': 19770, 'synset': 'yellow_colicroot.n.01', 'name': 'yellow_colicroot'}, {'id': 19771, 'synset': 'alliaceous_plant.n.01', 'name': 'alliaceous_plant'}, {'id': 19772, 'synset': "hooker's_onion.n.01", 'name': "Hooker's_onion"}, {'id': 19773, 'synset': 'wild_leek.n.02', 'name': 'wild_leek'}, {'id': 19774, 'synset': 'canada_garlic.n.01', 'name': 'Canada_garlic'}, {'id': 19775, 'synset': 'keeled_garlic.n.01', 'name': 'keeled_garlic'}, {'id': 19776, 'synset': 'shallot.n.02', 'name': 'shallot'}, {'id': 19777, 'synset': 'nodding_onion.n.01', 'name': 'nodding_onion'}, {'id': 19778, 'synset': 'welsh_onion.n.01', 'name': 'Welsh_onion'}, {'id': 19779, 'synset': 'red-skinned_onion.n.01', 'name': 'red-skinned_onion'}, {'id': 19780, 'synset': 'daffodil_garlic.n.01', 'name': 'daffodil_garlic'}, {'id': 19781, 'synset': 'few-flowered_leek.n.01', 'name': 'few-flowered_leek'}, {'id': 19782, 'synset': 'garlic.n.01', 'name': 'garlic'}, {'id': 19783, 'synset': 'sand_leek.n.01', 'name': 'sand_leek'}, {'id': 19784, 'synset': 'chives.n.01', 'name': 'chives'}, {'id': 19785, 'synset': 'crow_garlic.n.01', 'name': 'crow_garlic'}, {'id': 19786, 'synset': 'wild_garlic.n.01', 'name': 'wild_garlic'}, {'id': 19787, 'synset': 'garlic_chive.n.01', 'name': 'garlic_chive'}, {'id': 19788, 'synset': 'round-headed_leek.n.01', 'name': 'round-headed_leek'}, {'id': 19789, 'synset': 'three-cornered_leek.n.01', 'name': 'three-cornered_leek'}, {'id': 19790, 'synset': 'cape_aloe.n.01', 'name': 'cape_aloe'}, {'id': 19791, 'synset': 'kniphofia.n.01', 'name': 'kniphofia'}, {'id': 19792, 'synset': 'poker_plant.n.01', 'name': 'poker_plant'}, {'id': 19793, 'synset': 'red-hot_poker.n.01', 'name': 'red-hot_poker'}, {'id': 19794, 'synset': 'fly_poison.n.01', 'name': 'fly_poison'}, {'id': 19795, 'synset': 'amber_lily.n.01', 'name': 'amber_lily'}, {'id': 19796, 'synset': 'asparagus.n.01', 'name': 'asparagus'}, {'id': 19797, 'synset': 'asparagus_fern.n.01', 'name': 'asparagus_fern'}, {'id': 19798, 'synset': 'smilax.n.02', 'name': 'smilax'}, {'id': 19799, 'synset': 'asphodel.n.01', 'name': 'asphodel'}, {'id': 19800, 'synset': "jacob's_rod.n.01", 'name': "Jacob's_rod"}, {'id': 19801, 'synset': 'aspidistra.n.01', 'name': 'aspidistra'}, {'id': 19802, 'synset': 'coral_drops.n.01', 'name': 'coral_drops'}, {'id': 19803, 'synset': 'christmas_bells.n.01', 'name': 'Christmas_bells'}, {'id': 19804, 'synset': 'climbing_onion.n.01', 'name': 'climbing_onion'}, {'id': 19805, 'synset': 'mariposa.n.01', 'name': 'mariposa'}, {'id': 19806, 'synset': 'globe_lily.n.01', 'name': 'globe_lily'}, {'id': 19807, 'synset': "cat's-ear.n.01", 'name': "cat's-ear"}, {'id': 19808, 'synset': 'white_globe_lily.n.01', 'name': 'white_globe_lily'}, {'id': 19809, 'synset': 'yellow_globe_lily.n.01', 'name': 'yellow_globe_lily'}, {'id': 19810, 'synset': 'rose_globe_lily.n.01', 'name': 'rose_globe_lily'}, {'id': 19811, 'synset': 'star_tulip.n.01', 'name': 'star_tulip'}, {'id': 19812, 'synset': 'desert_mariposa_tulip.n.01', 'name': 'desert_mariposa_tulip'}, {'id': 19813, 'synset': 'yellow_mariposa_tulip.n.01', 'name': 'yellow_mariposa_tulip'}, {'id': 19814, 'synset': 'sagebrush_mariposa_tulip.n.01', 'name': 'sagebrush_mariposa_tulip'}, {'id': 19815, 'synset': 'sego_lily.n.01', 'name': 'sego_lily'}, {'id': 19816, 'synset': 'camas.n.01', 'name': 'camas'}, {'id': 19817, 'synset': 'common_camas.n.01', 'name': 'common_camas'}, {'id': 19818, 'synset': "leichtlin's_camas.n.01", 'name': "Leichtlin's_camas"}, {'id': 19819, 'synset': 'wild_hyacinth.n.02', 'name': 'wild_hyacinth'}, {'id': 19820, 'synset': 'dogtooth_violet.n.01', 'name': 'dogtooth_violet'}, {'id': 19821, 'synset': 'white_dogtooth_violet.n.01', 'name': 'white_dogtooth_violet'}, {'id': 19822, 'synset': "yellow_adder's_tongue.n.01", 'name': "yellow_adder's_tongue"}, {'id': 19823, 'synset': 'european_dogtooth.n.01', 'name': 'European_dogtooth'}, {'id': 19824, 'synset': 'fawn_lily.n.01', 'name': 'fawn_lily'}, {'id': 19825, 'synset': 'glacier_lily.n.01', 'name': 'glacier_lily'}, {'id': 19826, 'synset': 'avalanche_lily.n.01', 'name': 'avalanche_lily'}, {'id': 19827, 'synset': 'fritillary.n.01', 'name': 'fritillary'}, {'id': 19828, 'synset': 'mission_bells.n.02', 'name': 'mission_bells'}, {'id': 19829, 'synset': 'mission_bells.n.01', 'name': 'mission_bells'}, {'id': 19830, 'synset': 'stink_bell.n.01', 'name': 'stink_bell'}, {'id': 19831, 'synset': 'crown_imperial.n.01', 'name': 'crown_imperial'}, {'id': 19832, 'synset': 'white_fritillary.n.01', 'name': 'white_fritillary'}, {'id': 19833, 'synset': "snake's_head_fritillary.n.01", 'name': "snake's_head_fritillary"}, {'id': 19834, 'synset': 'adobe_lily.n.01', 'name': 'adobe_lily'}, {'id': 19835, 'synset': 'scarlet_fritillary.n.01', 'name': 'scarlet_fritillary'}, {'id': 19836, 'synset': 'tulip.n.01', 'name': 'tulip'}, {'id': 19837, 'synset': 'dwarf_tulip.n.01', 'name': 'dwarf_tulip'}, {'id': 19838, 'synset': 'lady_tulip.n.01', 'name': 'lady_tulip'}, {'id': 19839, 'synset': 'tulipa_gesneriana.n.01', 'name': 'Tulipa_gesneriana'}, {'id': 19840, 'synset': 'cottage_tulip.n.01', 'name': 'cottage_tulip'}, {'id': 19841, 'synset': 'darwin_tulip.n.01', 'name': 'Darwin_tulip'}, {'id': 19842, 'synset': 'gloriosa.n.01', 'name': 'gloriosa'}, {'id': 19843, 'synset': 'lemon_lily.n.01', 'name': 'lemon_lily'}, {'id': 19844, 'synset': 'common_hyacinth.n.01', 'name': 'common_hyacinth'}, {'id': 19845, 'synset': 'roman_hyacinth.n.01', 'name': 'Roman_hyacinth'}, {'id': 19846, 'synset': 'summer_hyacinth.n.01', 'name': 'summer_hyacinth'}, {'id': 19847, 'synset': 'star-of-bethlehem.n.01', 'name': 'star-of-Bethlehem'}, {'id': 19848, 'synset': 'bath_asparagus.n.01', 'name': 'bath_asparagus'}, {'id': 19849, 'synset': 'grape_hyacinth.n.01', 'name': 'grape_hyacinth'}, {'id': 19850, 'synset': 'common_grape_hyacinth.n.01', 'name': 'common_grape_hyacinth'}, {'id': 19851, 'synset': 'tassel_hyacinth.n.01', 'name': 'tassel_hyacinth'}, {'id': 19852, 'synset': 'scilla.n.01', 'name': 'scilla'}, {'id': 19853, 'synset': 'spring_squill.n.01', 'name': 'spring_squill'}, {'id': 19854, 'synset': 'false_asphodel.n.01', 'name': 'false_asphodel'}, {'id': 19855, 'synset': 'scotch_asphodel.n.01', 'name': 'Scotch_asphodel'}, {'id': 19856, 'synset': 'sea_squill.n.01', 'name': 'sea_squill'}, {'id': 19857, 'synset': 'squill.n.01', 'name': 'squill'}, {'id': 19858, 'synset': "butcher's_broom.n.01", 'name': "butcher's_broom"}, {'id': 19859, 'synset': 'bog_asphodel.n.01', 'name': 'bog_asphodel'}, {'id': 19860, 'synset': 'european_bog_asphodel.n.01', 'name': 'European_bog_asphodel'}, {'id': 19861, 'synset': 'american_bog_asphodel.n.01', 'name': 'American_bog_asphodel'}, {'id': 19862, 'synset': 'hellebore.n.01', 'name': 'hellebore'}, {'id': 19863, 'synset': 'white_hellebore.n.01', 'name': 'white_hellebore'}, {'id': 19864, 'synset': 'squaw_grass.n.01', 'name': 'squaw_grass'}, {'id': 19865, 'synset': 'death_camas.n.01', 'name': 'death_camas'}, {'id': 19866, 'synset': 'alkali_grass.n.01', 'name': 'alkali_grass'}, {'id': 19867, 'synset': 'white_camas.n.01', 'name': 'white_camas'}, {'id': 19868, 'synset': 'poison_camas.n.01', 'name': 'poison_camas'}, {'id': 19869, 'synset': 'grassy_death_camas.n.01', 'name': 'grassy_death_camas'}, {'id': 19870, 'synset': 'prairie_wake-robin.n.01', 'name': 'prairie_wake-robin'}, {'id': 19871, 'synset': 'dwarf-white_trillium.n.01', 'name': 'dwarf-white_trillium'}, {'id': 19872, 'synset': 'herb_paris.n.01', 'name': 'herb_Paris'}, {'id': 19873, 'synset': 'sarsaparilla.n.01', 'name': 'sarsaparilla'}, {'id': 19874, 'synset': 'bullbrier.n.01', 'name': 'bullbrier'}, {'id': 19875, 'synset': 'rough_bindweed.n.01', 'name': 'rough_bindweed'}, {'id': 19876, 'synset': 'clintonia.n.01', 'name': 'clintonia'}, {'id': 19877, 'synset': 'false_lily_of_the_valley.n.02', 'name': 'false_lily_of_the_valley'}, {'id': 19878, 'synset': 'false_lily_of_the_valley.n.01', 'name': 'false_lily_of_the_valley'}, {'id': 19879, 'synset': "solomon's-seal.n.01", 'name': "Solomon's-seal"}, {'id': 19880, 'synset': "great_solomon's-seal.n.01", 'name': "great_Solomon's-seal"}, {'id': 19881, 'synset': 'bellwort.n.01', 'name': 'bellwort'}, {'id': 19882, 'synset': 'strawflower.n.01', 'name': 'strawflower'}, {'id': 19883, 'synset': 'pia.n.01', 'name': 'pia'}, {'id': 19884, 'synset': 'agave.n.01', 'name': 'agave'}, {'id': 19885, 'synset': 'american_agave.n.01', 'name': 'American_agave'}, {'id': 19886, 'synset': 'sisal.n.02', 'name': 'sisal'}, {'id': 19887, 'synset': 'maguey.n.02', 'name': 'maguey'}, {'id': 19888, 'synset': 'maguey.n.01', 'name': 'maguey'}, {'id': 19889, 'synset': 'agave_tequilana.n.01', 'name': 'Agave_tequilana'}, {'id': 19890, 'synset': 'cabbage_tree.n.03', 'name': 'cabbage_tree'}, {'id': 19891, 'synset': 'dracaena.n.01', 'name': 'dracaena'}, {'id': 19892, 'synset': 'tuberose.n.01', 'name': 'tuberose'}, {'id': 19893, 'synset': 'sansevieria.n.01', 'name': 'sansevieria'}, {'id': 19894, 'synset': 'african_bowstring_hemp.n.01', 'name': 'African_bowstring_hemp'}, {'id': 19895, 'synset': 'ceylon_bowstring_hemp.n.01', 'name': 'Ceylon_bowstring_hemp'}, {'id': 19896, 'synset': "mother-in-law's_tongue.n.01", 'name': "mother-in-law's_tongue"}, {'id': 19897, 'synset': 'spanish_bayonet.n.02', 'name': 'Spanish_bayonet'}, {'id': 19898, 'synset': 'spanish_bayonet.n.01', 'name': 'Spanish_bayonet'}, {'id': 19899, 'synset': 'joshua_tree.n.01', 'name': 'Joshua_tree'}, {'id': 19900, 'synset': 'soapweed.n.01', 'name': 'soapweed'}, {'id': 19901, 'synset': "adam's_needle.n.01", 'name': "Adam's_needle"}, {'id': 19902, 'synset': 'bear_grass.n.02', 'name': 'bear_grass'}, {'id': 19903, 'synset': 'spanish_dagger.n.01', 'name': 'Spanish_dagger'}, {'id': 19904, 'synset': "our_lord's_candle.n.01", 'name': "Our_Lord's_candle"}, {'id': 19905, 'synset': 'water_shamrock.n.01', 'name': 'water_shamrock'}, {'id': 19906, 'synset': 'butterfly_bush.n.01', 'name': 'butterfly_bush'}, {'id': 19907, 'synset': 'yellow_jasmine.n.01', 'name': 'yellow_jasmine'}, {'id': 19908, 'synset': 'flax.n.02', 'name': 'flax'}, {'id': 19909, 'synset': 'calabar_bean.n.01', 'name': 'calabar_bean'}, {'id': 19910, 'synset': 'bonduc.n.02', 'name': 'bonduc'}, {'id': 19911, 'synset': 'divi-divi.n.02', 'name': 'divi-divi'}, {'id': 19912, 'synset': 'mysore_thorn.n.01', 'name': 'Mysore_thorn'}, {'id': 19913, 'synset': 'brazilian_ironwood.n.01', 'name': 'brazilian_ironwood'}, {'id': 19914, 'synset': 'bird_of_paradise.n.01', 'name': 'bird_of_paradise'}, {'id': 19915, 'synset': 'shingle_tree.n.01', 'name': 'shingle_tree'}, {'id': 19916, 'synset': 'mountain_ebony.n.01', 'name': 'mountain_ebony'}, {'id': 19917, 'synset': 'msasa.n.01', 'name': 'msasa'}, {'id': 19918, 'synset': 'cassia.n.01', 'name': 'cassia'}, {'id': 19919, 'synset': 'golden_shower_tree.n.01', 'name': 'golden_shower_tree'}, {'id': 19920, 'synset': 'pink_shower.n.01', 'name': 'pink_shower'}, {'id': 19921, 'synset': 'rainbow_shower.n.01', 'name': 'rainbow_shower'}, {'id': 19922, 'synset': 'horse_cassia.n.01', 'name': 'horse_cassia'}, {'id': 19923, 'synset': 'carob.n.02', 'name': 'carob'}, {'id': 19924, 'synset': 'carob.n.01', 'name': 'carob'}, {'id': 19925, 'synset': 'paloverde.n.01', 'name': 'paloverde'}, {'id': 19926, 'synset': 'royal_poinciana.n.01', 'name': 'royal_poinciana'}, {'id': 19927, 'synset': 'locust_tree.n.01', 'name': 'locust_tree'}, {'id': 19928, 'synset': 'water_locust.n.01', 'name': 'water_locust'}, {'id': 19929, 'synset': 'honey_locust.n.01', 'name': 'honey_locust'}, {'id': 19930, 'synset': 'kentucky_coffee_tree.n.01', 'name': 'Kentucky_coffee_tree'}, {'id': 19931, 'synset': 'logwood.n.02', 'name': 'logwood'}, {'id': 19932, 'synset': 'jerusalem_thorn.n.03', 'name': 'Jerusalem_thorn'}, {'id': 19933, 'synset': 'palo_verde.n.01', 'name': 'palo_verde'}, {'id': 19934, 'synset': 'dalmatian_laburnum.n.01', 'name': 'Dalmatian_laburnum'}, {'id': 19935, 'synset': 'senna.n.01', 'name': 'senna'}, {'id': 19936, 'synset': 'avaram.n.01', 'name': 'avaram'}, {'id': 19937, 'synset': 'alexandria_senna.n.01', 'name': 'Alexandria_senna'}, {'id': 19938, 'synset': 'wild_senna.n.01', 'name': 'wild_senna'}, {'id': 19939, 'synset': 'sicklepod.n.01', 'name': 'sicklepod'}, {'id': 19940, 'synset': 'coffee_senna.n.01', 'name': 'coffee_senna'}, {'id': 19941, 'synset': 'tamarind.n.01', 'name': 'tamarind'}, {'id': 19942, 'synset': 'false_indigo.n.03', 'name': 'false_indigo'}, {'id': 19943, 'synset': 'false_indigo.n.02', 'name': 'false_indigo'}, {'id': 19944, 'synset': 'hog_peanut.n.01', 'name': 'hog_peanut'}, {'id': 19945, 'synset': 'angelim.n.01', 'name': 'angelim'}, {'id': 19946, 'synset': 'cabbage_bark.n.01', 'name': 'cabbage_bark'}, {'id': 19947, 'synset': 'kidney_vetch.n.01', 'name': 'kidney_vetch'}, {'id': 19948, 'synset': 'groundnut.n.01', 'name': 'groundnut'}, {'id': 19949, 'synset': 'rooibos.n.01', 'name': 'rooibos'}, {'id': 19950, 'synset': 'milk_vetch.n.01', 'name': 'milk_vetch'}, {'id': 19951, 'synset': 'alpine_milk_vetch.n.01', 'name': 'alpine_milk_vetch'}, {'id': 19952, 'synset': 'purple_milk_vetch.n.01', 'name': 'purple_milk_vetch'}, {'id': 19953, 'synset': 'camwood.n.01', 'name': 'camwood'}, {'id': 19954, 'synset': 'wild_indigo.n.01', 'name': 'wild_indigo'}, {'id': 19955, 'synset': 'blue_false_indigo.n.01', 'name': 'blue_false_indigo'}, {'id': 19956, 'synset': 'white_false_indigo.n.01', 'name': 'white_false_indigo'}, {'id': 19957, 'synset': 'indigo_broom.n.01', 'name': 'indigo_broom'}, {'id': 19958, 'synset': 'dhak.n.01', 'name': 'dhak'}, {'id': 19959, 'synset': 'pigeon_pea.n.01', 'name': 'pigeon_pea'}, {'id': 19960, 'synset': 'sword_bean.n.01', 'name': 'sword_bean'}, {'id': 19961, 'synset': 'pea_tree.n.01', 'name': 'pea_tree'}, {'id': 19962, 'synset': 'siberian_pea_tree.n.01', 'name': 'Siberian_pea_tree'}, {'id': 19963, 'synset': 'chinese_pea_tree.n.01', 'name': 'Chinese_pea_tree'}, {'id': 19964, 'synset': 'moreton_bay_chestnut.n.01', 'name': 'Moreton_Bay_chestnut'}, {'id': 19965, 'synset': 'butterfly_pea.n.03', 'name': 'butterfly_pea'}, {'id': 19966, 'synset': 'judas_tree.n.01', 'name': 'Judas_tree'}, {'id': 19967, 'synset': 'redbud.n.01', 'name': 'redbud'}, {'id': 19968, 'synset': 'western_redbud.n.01', 'name': 'western_redbud'}, {'id': 19969, 'synset': 'tagasaste.n.01', 'name': 'tagasaste'}, {'id': 19970, 'synset': 'weeping_tree_broom.n.01', 'name': 'weeping_tree_broom'}, {'id': 19971, 'synset': 'flame_pea.n.01', 'name': 'flame_pea'}, {'id': 19972, 'synset': 'chickpea.n.02', 'name': 'chickpea'}, {'id': 19973, 'synset': 'kentucky_yellowwood.n.01', 'name': 'Kentucky_yellowwood'}, {'id': 19974, 'synset': 'glory_pea.n.01', 'name': 'glory_pea'}, {'id': 19975, 'synset': 'desert_pea.n.01', 'name': 'desert_pea'}, {'id': 19976, 'synset': "parrot's_beak.n.01", 'name': "parrot's_beak"}, {'id': 19977, 'synset': 'butterfly_pea.n.02', 'name': 'butterfly_pea'}, {'id': 19978, 'synset': 'blue_pea.n.01', 'name': 'blue_pea'}, {'id': 19979, 'synset': 'telegraph_plant.n.01', 'name': 'telegraph_plant'}, {'id': 19980, 'synset': 'bladder_senna.n.01', 'name': 'bladder_senna'}, {'id': 19981, 'synset': 'axseed.n.01', 'name': 'axseed'}, {'id': 19982, 'synset': 'crotalaria.n.01', 'name': 'crotalaria'}, {'id': 19983, 'synset': 'guar.n.01', 'name': 'guar'}, {'id': 19984, 'synset': 'white_broom.n.01', 'name': 'white_broom'}, {'id': 19985, 'synset': 'common_broom.n.01', 'name': 'common_broom'}, {'id': 19986, 'synset': 'rosewood.n.02', 'name': 'rosewood'}, {'id': 19987, 'synset': 'indian_blackwood.n.01', 'name': 'Indian_blackwood'}, {'id': 19988, 'synset': 'sissoo.n.01', 'name': 'sissoo'}, {'id': 19989, 'synset': 'kingwood.n.02', 'name': 'kingwood'}, {'id': 19990, 'synset': 'brazilian_rosewood.n.01', 'name': 'Brazilian_rosewood'}, {'id': 19991, 'synset': 'cocobolo.n.01', 'name': 'cocobolo'}, {'id': 19992, 'synset': 'blackwood.n.02', 'name': 'blackwood'}, {'id': 19993, 'synset': 'bitter_pea.n.01', 'name': 'bitter_pea'}, {'id': 19994, 'synset': 'derris.n.01', 'name': 'derris'}, {'id': 19995, 'synset': 'derris_root.n.01', 'name': 'derris_root'}, {'id': 19996, 'synset': 'prairie_mimosa.n.01', 'name': 'prairie_mimosa'}, {'id': 19997, 'synset': 'tick_trefoil.n.01', 'name': 'tick_trefoil'}, {'id': 19998, 'synset': 'beggarweed.n.01', 'name': 'beggarweed'}, {'id': 19999, 'synset': 'australian_pea.n.01', 'name': 'Australian_pea'}, {'id': 20000, 'synset': 'coral_tree.n.01', 'name': 'coral_tree'}, {'id': 20001, 'synset': 'kaffir_boom.n.02', 'name': 'kaffir_boom'}, {'id': 20002, 'synset': 'coral_bean_tree.n.01', 'name': 'coral_bean_tree'}, {'id': 20003, 'synset': 'ceibo.n.01', 'name': 'ceibo'}, {'id': 20004, 'synset': 'kaffir_boom.n.01', 'name': 'kaffir_boom'}, {'id': 20005, 'synset': 'indian_coral_tree.n.01', 'name': 'Indian_coral_tree'}, {'id': 20006, 'synset': 'cork_tree.n.02', 'name': 'cork_tree'}, {'id': 20007, 'synset': "goat's_rue.n.02", 'name': "goat's_rue"}, {'id': 20008, 'synset': 'poison_bush.n.01', 'name': 'poison_bush'}, {'id': 20009, 'synset': 'spanish_broom.n.02', 'name': 'Spanish_broom'}, {'id': 20010, 'synset': 'woodwaxen.n.01', 'name': 'woodwaxen'}, {'id': 20011, 'synset': 'chanar.n.01', 'name': 'chanar'}, {'id': 20012, 'synset': 'gliricidia.n.01', 'name': 'gliricidia'}, {'id': 20013, 'synset': 'soy.n.01', 'name': 'soy'}, {'id': 20014, 'synset': 'licorice.n.01', 'name': 'licorice'}, {'id': 20015, 'synset': 'wild_licorice.n.02', 'name': 'wild_licorice'}, {'id': 20016, 'synset': 'licorice_root.n.01', 'name': 'licorice_root'}, {'id': 20017, 'synset': 'western_australia_coral_pea.n.01', 'name': 'Western_Australia_coral_pea'}, {'id': 20018, 'synset': 'sweet_vetch.n.01', 'name': 'sweet_vetch'}, {'id': 20019, 'synset': 'french_honeysuckle.n.02', 'name': 'French_honeysuckle'}, {'id': 20020, 'synset': 'anil.n.02', 'name': 'anil'}, {'id': 20021, 'synset': 'scarlet_runner.n.02', 'name': 'scarlet_runner'}, {'id': 20022, 'synset': 'hyacinth_bean.n.01', 'name': 'hyacinth_bean'}, {'id': 20023, 'synset': 'scotch_laburnum.n.01', 'name': 'Scotch_laburnum'}, {'id': 20024, 'synset': 'vetchling.n.01', 'name': 'vetchling'}, {'id': 20025, 'synset': 'wild_pea.n.01', 'name': 'wild_pea'}, {'id': 20026, 'synset': 'everlasting_pea.n.01', 'name': 'everlasting_pea'}, {'id': 20027, 'synset': 'beach_pea.n.01', 'name': 'beach_pea'}, {'id': 20028, 'synset': 'grass_vetch.n.01', 'name': 'grass_vetch'}, {'id': 20029, 'synset': 'marsh_pea.n.01', 'name': 'marsh_pea'}, {'id': 20030, 'synset': 'common_vetchling.n.01', 'name': 'common_vetchling'}, {'id': 20031, 'synset': 'grass_pea.n.01', 'name': 'grass_pea'}, {'id': 20032, 'synset': 'tangier_pea.n.01', 'name': 'Tangier_pea'}, {'id': 20033, 'synset': 'heath_pea.n.01', 'name': 'heath_pea'}, {'id': 20034, 'synset': 'bicolor_lespediza.n.01', 'name': 'bicolor_lespediza'}, {'id': 20035, 'synset': 'japanese_clover.n.01', 'name': 'japanese_clover'}, {'id': 20036, 'synset': 'korean_lespedeza.n.01', 'name': 'Korean_lespedeza'}, {'id': 20037, 'synset': 'sericea_lespedeza.n.01', 'name': 'sericea_lespedeza'}, {'id': 20038, 'synset': 'lentil.n.03', 'name': 'lentil'}, {'id': 20039, 'synset': 'lentil.n.02', 'name': 'lentil'}, {'id': 20040, 'synset': "prairie_bird's-foot_trefoil.n.01", 'name': "prairie_bird's-foot_trefoil"}, {'id': 20041, 'synset': "bird's_foot_trefoil.n.02", 'name': "bird's_foot_trefoil"}, {'id': 20042, 'synset': 'winged_pea.n.02', 'name': 'winged_pea'}, {'id': 20043, 'synset': 'lupine.n.01', 'name': 'lupine'}, {'id': 20044, 'synset': 'white_lupine.n.01', 'name': 'white_lupine'}, {'id': 20045, 'synset': 'tree_lupine.n.01', 'name': 'tree_lupine'}, {'id': 20046, 'synset': 'wild_lupine.n.01', 'name': 'wild_lupine'}, {'id': 20047, 'synset': 'bluebonnet.n.01', 'name': 'bluebonnet'}, {'id': 20048, 'synset': 'texas_bluebonnet.n.01', 'name': 'Texas_bluebonnet'}, {'id': 20049, 'synset': 'medic.n.01', 'name': 'medic'}, {'id': 20050, 'synset': 'moon_trefoil.n.01', 'name': 'moon_trefoil'}, {'id': 20051, 'synset': 'sickle_alfalfa.n.01', 'name': 'sickle_alfalfa'}, {'id': 20052, 'synset': 'calvary_clover.n.01', 'name': 'Calvary_clover'}, {'id': 20053, 'synset': 'black_medick.n.01', 'name': 'black_medick'}, {'id': 20054, 'synset': 'alfalfa.n.01', 'name': 'alfalfa'}, {'id': 20055, 'synset': 'millettia.n.01', 'name': 'millettia'}, {'id': 20056, 'synset': 'mucuna.n.01', 'name': 'mucuna'}, {'id': 20057, 'synset': 'cowage.n.02', 'name': 'cowage'}, {'id': 20058, 'synset': 'tolu_tree.n.01', 'name': 'tolu_tree'}, {'id': 20059, 'synset': 'peruvian_balsam.n.01', 'name': 'Peruvian_balsam'}, {'id': 20060, 'synset': 'sainfoin.n.01', 'name': 'sainfoin'}, {'id': 20061, 'synset': 'restharrow.n.02', 'name': 'restharrow'}, {'id': 20062, 'synset': 'bead_tree.n.01', 'name': 'bead_tree'}, {'id': 20063, 'synset': 'jumby_bead.n.01', 'name': 'jumby_bead'}, {'id': 20064, 'synset': 'locoweed.n.01', 'name': 'locoweed'}, {'id': 20065, 'synset': 'purple_locoweed.n.01', 'name': 'purple_locoweed'}, {'id': 20066, 'synset': 'tumbleweed.n.01', 'name': 'tumbleweed'}, {'id': 20067, 'synset': 'yam_bean.n.02', 'name': 'yam_bean'}, {'id': 20068, 'synset': 'shamrock_pea.n.01', 'name': 'shamrock_pea'}, {'id': 20069, 'synset': 'pole_bean.n.01', 'name': 'pole_bean'}, {'id': 20070, 'synset': 'kidney_bean.n.01', 'name': 'kidney_bean'}, {'id': 20071, 'synset': 'haricot.n.01', 'name': 'haricot'}, {'id': 20072, 'synset': 'wax_bean.n.01', 'name': 'wax_bean'}, {'id': 20073, 'synset': 'scarlet_runner.n.01', 'name': 'scarlet_runner'}, {'id': 20074, 'synset': 'lima_bean.n.02', 'name': 'lima_bean'}, {'id': 20075, 'synset': 'sieva_bean.n.01', 'name': 'sieva_bean'}, {'id': 20076, 'synset': 'tepary_bean.n.01', 'name': 'tepary_bean'}, {'id': 20077, 'synset': 'chaparral_pea.n.01', 'name': 'chaparral_pea'}, {'id': 20078, 'synset': 'jamaica_dogwood.n.01', 'name': 'Jamaica_dogwood'}, {'id': 20079, 'synset': 'pea.n.02', 'name': 'pea'}, {'id': 20080, 'synset': 'garden_pea.n.01', 'name': 'garden_pea'}, {'id': 20081, 'synset': 'edible-pod_pea.n.01', 'name': 'edible-pod_pea'}, {'id': 20082, 'synset': 'sugar_snap_pea.n.01', 'name': 'sugar_snap_pea'}, {'id': 20083, 'synset': 'field_pea.n.02', 'name': 'field_pea'}, {'id': 20084, 'synset': 'field_pea.n.01', 'name': 'field_pea'}, {'id': 20085, 'synset': 'common_flat_pea.n.01', 'name': 'common_flat_pea'}, {'id': 20086, 'synset': 'quira.n.02', 'name': 'quira'}, {'id': 20087, 'synset': 'roble.n.01', 'name': 'roble'}, {'id': 20088, 'synset': 'panama_redwood_tree.n.01', 'name': 'Panama_redwood_tree'}, {'id': 20089, 'synset': 'indian_beech.n.01', 'name': 'Indian_beech'}, {'id': 20090, 'synset': 'winged_bean.n.01', 'name': 'winged_bean'}, {'id': 20091, 'synset': 'breadroot.n.01', 'name': 'breadroot'}, {'id': 20092, 'synset': 'bloodwood_tree.n.01', 'name': 'bloodwood_tree'}, {'id': 20093, 'synset': 'kino.n.02', 'name': 'kino'}, {'id': 20094, 'synset': 'red_sandalwood.n.02', 'name': 'red_sandalwood'}, {'id': 20095, 'synset': 'kudzu.n.01', 'name': 'kudzu'}, {'id': 20096, 'synset': 'bristly_locust.n.01', 'name': 'bristly_locust'}, {'id': 20097, 'synset': 'black_locust.n.02', 'name': 'black_locust'}, {'id': 20098, 'synset': 'clammy_locust.n.01', 'name': 'clammy_locust'}, {'id': 20099, 'synset': 'carib_wood.n.01', 'name': 'carib_wood'}, {'id': 20100, 'synset': 'colorado_river_hemp.n.01', 'name': 'Colorado_River_hemp'}, {'id': 20101, 'synset': 'scarlet_wisteria_tree.n.01', 'name': 'scarlet_wisteria_tree'}, {'id': 20102, 'synset': 'japanese_pagoda_tree.n.01', 'name': 'Japanese_pagoda_tree'}, {'id': 20103, 'synset': 'mescal_bean.n.01', 'name': 'mescal_bean'}, {'id': 20104, 'synset': 'kowhai.n.01', 'name': 'kowhai'}, {'id': 20105, 'synset': 'jade_vine.n.01', 'name': 'jade_vine'}, {'id': 20106, 'synset': 'hoary_pea.n.01', 'name': 'hoary_pea'}, {'id': 20107, 'synset': 'bastard_indigo.n.01', 'name': 'bastard_indigo'}, {'id': 20108, 'synset': 'catgut.n.01', 'name': 'catgut'}, {'id': 20109, 'synset': 'bush_pea.n.01', 'name': 'bush_pea'}, {'id': 20110, 'synset': 'false_lupine.n.01', 'name': 'false_lupine'}, {'id': 20111, 'synset': 'carolina_lupine.n.01', 'name': 'Carolina_lupine'}, {'id': 20112, 'synset': 'tipu.n.01', 'name': 'tipu'}, {'id': 20113, 'synset': "bird's_foot_trefoil.n.01", 'name': "bird's_foot_trefoil"}, {'id': 20114, 'synset': 'fenugreek.n.01', 'name': 'fenugreek'}, {'id': 20115, 'synset': 'gorse.n.01', 'name': 'gorse'}, {'id': 20116, 'synset': 'vetch.n.01', 'name': 'vetch'}, {'id': 20117, 'synset': 'tufted_vetch.n.01', 'name': 'tufted_vetch'}, {'id': 20118, 'synset': 'broad_bean.n.01', 'name': 'broad_bean'}, {'id': 20119, 'synset': 'bitter_betch.n.01', 'name': 'bitter_betch'}, {'id': 20120, 'synset': 'bush_vetch.n.01', 'name': 'bush_vetch'}, {'id': 20121, 'synset': 'moth_bean.n.01', 'name': 'moth_bean'}, {'id': 20122, 'synset': 'snailflower.n.01', 'name': 'snailflower'}, {'id': 20123, 'synset': 'mung.n.01', 'name': 'mung'}, {'id': 20124, 'synset': 'cowpea.n.02', 'name': 'cowpea'}, {'id': 20125, 'synset': 'cowpea.n.01', 'name': 'cowpea'}, {'id': 20126, 'synset': 'asparagus_bean.n.01', 'name': 'asparagus_bean'}, {'id': 20127, 'synset': 'swamp_oak.n.01', 'name': 'swamp_oak'}, {'id': 20128, 'synset': 'keurboom.n.02', 'name': 'keurboom'}, {'id': 20129, 'synset': 'keurboom.n.01', 'name': 'keurboom'}, {'id': 20130, 'synset': 'japanese_wistaria.n.01', 'name': 'Japanese_wistaria'}, {'id': 20131, 'synset': 'chinese_wistaria.n.01', 'name': 'Chinese_wistaria'}, {'id': 20132, 'synset': 'american_wistaria.n.01', 'name': 'American_wistaria'}, {'id': 20133, 'synset': 'silky_wisteria.n.01', 'name': 'silky_wisteria'}, {'id': 20134, 'synset': 'palm.n.03', 'name': 'palm'}, {'id': 20135, 'synset': 'sago_palm.n.01', 'name': 'sago_palm'}, {'id': 20136, 'synset': 'feather_palm.n.01', 'name': 'feather_palm'}, {'id': 20137, 'synset': 'fan_palm.n.01', 'name': 'fan_palm'}, {'id': 20138, 'synset': 'palmetto.n.01', 'name': 'palmetto'}, {'id': 20139, 'synset': 'coyol.n.01', 'name': 'coyol'}, {'id': 20140, 'synset': 'grugru.n.01', 'name': 'grugru'}, {'id': 20141, 'synset': 'areca.n.01', 'name': 'areca'}, {'id': 20142, 'synset': 'betel_palm.n.01', 'name': 'betel_palm'}, {'id': 20143, 'synset': 'sugar_palm.n.01', 'name': 'sugar_palm'}, {'id': 20144, 'synset': 'piassava_palm.n.01', 'name': 'piassava_palm'}, {'id': 20145, 'synset': 'coquilla_nut.n.01', 'name': 'coquilla_nut'}, {'id': 20146, 'synset': 'palmyra.n.01', 'name': 'palmyra'}, {'id': 20147, 'synset': 'calamus.n.01', 'name': 'calamus'}, {'id': 20148, 'synset': 'rattan.n.01', 'name': 'rattan'}, {'id': 20149, 'synset': 'lawyer_cane.n.01', 'name': 'lawyer_cane'}, {'id': 20150, 'synset': 'fishtail_palm.n.01', 'name': 'fishtail_palm'}, {'id': 20151, 'synset': 'wine_palm.n.01', 'name': 'wine_palm'}, {'id': 20152, 'synset': 'wax_palm.n.03', 'name': 'wax_palm'}, {'id': 20153, 'synset': 'coconut.n.03', 'name': 'coconut'}, {'id': 20154, 'synset': 'carnauba.n.02', 'name': 'carnauba'}, {'id': 20155, 'synset': 'caranday.n.01', 'name': 'caranday'}, {'id': 20156, 'synset': 'corozo.n.01', 'name': 'corozo'}, {'id': 20157, 'synset': 'gebang_palm.n.01', 'name': 'gebang_palm'}, {'id': 20158, 'synset': 'latanier.n.01', 'name': 'latanier'}, {'id': 20159, 'synset': 'talipot.n.01', 'name': 'talipot'}, {'id': 20160, 'synset': 'oil_palm.n.01', 'name': 'oil_palm'}, {'id': 20161, 'synset': 'african_oil_palm.n.01', 'name': 'African_oil_palm'}, {'id': 20162, 'synset': 'american_oil_palm.n.01', 'name': 'American_oil_palm'}, {'id': 20163, 'synset': 'palm_nut.n.01', 'name': 'palm_nut'}, {'id': 20164, 'synset': 'cabbage_palm.n.04', 'name': 'cabbage_palm'}, {'id': 20165, 'synset': 'cabbage_palm.n.03', 'name': 'cabbage_palm'}, {'id': 20166, 'synset': 'true_sago_palm.n.01', 'name': 'true_sago_palm'}, {'id': 20167, 'synset': 'nipa_palm.n.01', 'name': 'nipa_palm'}, {'id': 20168, 'synset': 'babassu.n.01', 'name': 'babassu'}, {'id': 20169, 'synset': 'babassu_nut.n.01', 'name': 'babassu_nut'}, {'id': 20170, 'synset': 'cohune_palm.n.01', 'name': 'cohune_palm'}, {'id': 20171, 'synset': 'cohune_nut.n.01', 'name': 'cohune_nut'}, {'id': 20172, 'synset': 'date_palm.n.01', 'name': 'date_palm'}, {'id': 20173, 'synset': 'ivory_palm.n.01', 'name': 'ivory_palm'}, {'id': 20174, 'synset': 'raffia_palm.n.01', 'name': 'raffia_palm'}, {'id': 20175, 'synset': 'bamboo_palm.n.02', 'name': 'bamboo_palm'}, {'id': 20176, 'synset': 'lady_palm.n.01', 'name': 'lady_palm'}, {'id': 20177, 'synset': 'miniature_fan_palm.n.01', 'name': 'miniature_fan_palm'}, {'id': 20178, 'synset': 'reed_rhapis.n.01', 'name': 'reed_rhapis'}, {'id': 20179, 'synset': 'royal_palm.n.01', 'name': 'royal_palm'}, {'id': 20180, 'synset': 'cabbage_palm.n.02', 'name': 'cabbage_palm'}, {'id': 20181, 'synset': 'cabbage_palmetto.n.01', 'name': 'cabbage_palmetto'}, {'id': 20182, 'synset': 'saw_palmetto.n.01', 'name': 'saw_palmetto'}, {'id': 20183, 'synset': 'thatch_palm.n.01', 'name': 'thatch_palm'}, {'id': 20184, 'synset': 'key_palm.n.01', 'name': 'key_palm'}, {'id': 20185, 'synset': 'english_plantain.n.01', 'name': 'English_plantain'}, {'id': 20186, 'synset': 'broad-leaved_plantain.n.02', 'name': 'broad-leaved_plantain'}, {'id': 20187, 'synset': 'hoary_plantain.n.02', 'name': 'hoary_plantain'}, {'id': 20188, 'synset': 'fleawort.n.01', 'name': 'fleawort'}, {'id': 20189, 'synset': "rugel's_plantain.n.01", 'name': "rugel's_plantain"}, {'id': 20190, 'synset': 'hoary_plantain.n.01', 'name': 'hoary_plantain'}, {'id': 20191, 'synset': 'buckwheat.n.01', 'name': 'buckwheat'}, {'id': 20192, 'synset': "prince's-feather.n.01", 'name': "prince's-feather"}, {'id': 20193, 'synset': 'eriogonum.n.01', 'name': 'eriogonum'}, {'id': 20194, 'synset': 'umbrella_plant.n.02', 'name': 'umbrella_plant'}, {'id': 20195, 'synset': 'wild_buckwheat.n.01', 'name': 'wild_buckwheat'}, {'id': 20196, 'synset': 'rhubarb.n.02', 'name': 'rhubarb'}, {'id': 20197, 'synset': 'himalayan_rhubarb.n.01', 'name': 'Himalayan_rhubarb'}, {'id': 20198, 'synset': 'pie_plant.n.01', 'name': 'pie_plant'}, {'id': 20199, 'synset': 'chinese_rhubarb.n.01', 'name': 'Chinese_rhubarb'}, {'id': 20200, 'synset': 'sour_dock.n.01', 'name': 'sour_dock'}, {'id': 20201, 'synset': 'sheep_sorrel.n.01', 'name': 'sheep_sorrel'}, {'id': 20202, 'synset': 'bitter_dock.n.01', 'name': 'bitter_dock'}, {'id': 20203, 'synset': 'french_sorrel.n.01', 'name': 'French_sorrel'}, {'id': 20204, 'synset': 'yellow-eyed_grass.n.01', 'name': 'yellow-eyed_grass'}, {'id': 20205, 'synset': 'commelina.n.01', 'name': 'commelina'}, {'id': 20206, 'synset': 'spiderwort.n.01', 'name': 'spiderwort'}, {'id': 20207, 'synset': 'pineapple.n.01', 'name': 'pineapple'}, {'id': 20208, 'synset': 'pipewort.n.01', 'name': 'pipewort'}, {'id': 20209, 'synset': 'water_hyacinth.n.01', 'name': 'water_hyacinth'}, {'id': 20210, 'synset': 'water_star_grass.n.01', 'name': 'water_star_grass'}, {'id': 20211, 'synset': 'naiad.n.01', 'name': 'naiad'}, {'id': 20212, 'synset': 'water_plantain.n.01', 'name': 'water_plantain'}, {'id': 20213, 'synset': 'narrow-leaved_water_plantain.n.01', 'name': 'narrow-leaved_water_plantain'}, {'id': 20214, 'synset': 'hydrilla.n.01', 'name': 'hydrilla'}, {'id': 20215, 'synset': 'american_frogbit.n.01', 'name': 'American_frogbit'}, {'id': 20216, 'synset': 'waterweed.n.01', 'name': 'waterweed'}, {'id': 20217, 'synset': 'canadian_pondweed.n.01', 'name': 'Canadian_pondweed'}, {'id': 20218, 'synset': 'tape_grass.n.01', 'name': 'tape_grass'}, {'id': 20219, 'synset': 'pondweed.n.01', 'name': 'pondweed'}, {'id': 20220, 'synset': 'curled_leaf_pondweed.n.01', 'name': 'curled_leaf_pondweed'}, {'id': 20221, 'synset': 'loddon_pondweed.n.01', 'name': 'loddon_pondweed'}, {'id': 20222, 'synset': "frog's_lettuce.n.01", 'name': "frog's_lettuce"}, {'id': 20223, 'synset': 'arrow_grass.n.01', 'name': 'arrow_grass'}, {'id': 20224, 'synset': 'horned_pondweed.n.01', 'name': 'horned_pondweed'}, {'id': 20225, 'synset': 'eelgrass.n.01', 'name': 'eelgrass'}, {'id': 20226, 'synset': 'rose.n.01', 'name': 'rose'}, {'id': 20227, 'synset': 'hip.n.05', 'name': 'hip'}, {'id': 20228, 'synset': 'banksia_rose.n.01', 'name': 'banksia_rose'}, {'id': 20229, 'synset': 'damask_rose.n.01', 'name': 'damask_rose'}, {'id': 20230, 'synset': 'sweetbrier.n.01', 'name': 'sweetbrier'}, {'id': 20231, 'synset': 'cherokee_rose.n.01', 'name': 'Cherokee_rose'}, {'id': 20232, 'synset': 'musk_rose.n.01', 'name': 'musk_rose'}, {'id': 20233, 'synset': 'agrimonia.n.01', 'name': 'agrimonia'}, {'id': 20234, 'synset': 'harvest-lice.n.01', 'name': 'harvest-lice'}, {'id': 20235, 'synset': 'fragrant_agrimony.n.01', 'name': 'fragrant_agrimony'}, {'id': 20236, 'synset': 'alderleaf_juneberry.n.01', 'name': 'alderleaf_Juneberry'}, {'id': 20237, 'synset': 'flowering_quince.n.01', 'name': 'flowering_quince'}, {'id': 20238, 'synset': 'japonica.n.02', 'name': 'japonica'}, {'id': 20239, 'synset': 'coco_plum.n.01', 'name': 'coco_plum'}, {'id': 20240, 'synset': 'cotoneaster.n.01', 'name': 'cotoneaster'}, {'id': 20241, 'synset': 'cotoneaster_dammeri.n.01', 'name': 'Cotoneaster_dammeri'}, {'id': 20242, 'synset': 'cotoneaster_horizontalis.n.01', 'name': 'Cotoneaster_horizontalis'}, {'id': 20243, 'synset': 'parsley_haw.n.01', 'name': 'parsley_haw'}, {'id': 20244, 'synset': 'scarlet_haw.n.01', 'name': 'scarlet_haw'}, {'id': 20245, 'synset': 'blackthorn.n.02', 'name': 'blackthorn'}, {'id': 20246, 'synset': 'cockspur_thorn.n.01', 'name': 'cockspur_thorn'}, {'id': 20247, 'synset': 'mayhaw.n.01', 'name': 'mayhaw'}, {'id': 20248, 'synset': 'red_haw.n.02', 'name': 'red_haw'}, {'id': 20249, 'synset': 'red_haw.n.01', 'name': 'red_haw'}, {'id': 20250, 'synset': 'quince.n.01', 'name': 'quince'}, {'id': 20251, 'synset': 'mountain_avens.n.01', 'name': 'mountain_avens'}, {'id': 20252, 'synset': 'loquat.n.01', 'name': 'loquat'}, {'id': 20253, 'synset': 'beach_strawberry.n.01', 'name': 'beach_strawberry'}, {'id': 20254, 'synset': 'virginia_strawberry.n.01', 'name': 'Virginia_strawberry'}, {'id': 20255, 'synset': 'avens.n.01', 'name': 'avens'}, {'id': 20256, 'synset': 'yellow_avens.n.02', 'name': 'yellow_avens'}, {'id': 20257, 'synset': 'yellow_avens.n.01', 'name': 'yellow_avens'}, {'id': 20258, 'synset': 'prairie_smoke.n.01', 'name': 'prairie_smoke'}, {'id': 20259, 'synset': 'bennet.n.01', 'name': 'bennet'}, {'id': 20260, 'synset': 'toyon.n.01', 'name': 'toyon'}, {'id': 20261, 'synset': 'apple_tree.n.01', 'name': 'apple_tree'}, {'id': 20262, 'synset': 'apple.n.02', 'name': 'apple'}, {'id': 20263, 'synset': 'wild_apple.n.01', 'name': 'wild_apple'}, {'id': 20264, 'synset': 'crab_apple.n.01', 'name': 'crab_apple'}, {'id': 20265, 'synset': 'siberian_crab.n.01', 'name': 'Siberian_crab'}, {'id': 20266, 'synset': 'wild_crab.n.01', 'name': 'wild_crab'}, {'id': 20267, 'synset': 'american_crab_apple.n.01', 'name': 'American_crab_apple'}, {'id': 20268, 'synset': 'oregon_crab_apple.n.01', 'name': 'Oregon_crab_apple'}, {'id': 20269, 'synset': 'southern_crab_apple.n.01', 'name': 'Southern_crab_apple'}, {'id': 20270, 'synset': 'iowa_crab.n.01', 'name': 'Iowa_crab'}, {'id': 20271, 'synset': 'bechtel_crab.n.01', 'name': 'Bechtel_crab'}, {'id': 20272, 'synset': 'medlar.n.02', 'name': 'medlar'}, {'id': 20273, 'synset': 'cinquefoil.n.01', 'name': 'cinquefoil'}, {'id': 20274, 'synset': 'silverweed.n.02', 'name': 'silverweed'}, {'id': 20275, 'synset': 'salad_burnet.n.01', 'name': 'salad_burnet'}, {'id': 20276, 'synset': 'plum.n.01', 'name': 'plum'}, {'id': 20277, 'synset': 'wild_plum.n.01', 'name': 'wild_plum'}, {'id': 20278, 'synset': 'allegheny_plum.n.01', 'name': 'Allegheny_plum'}, {'id': 20279, 'synset': 'american_red_plum.n.01', 'name': 'American_red_plum'}, {'id': 20280, 'synset': 'chickasaw_plum.n.01', 'name': 'chickasaw_plum'}, {'id': 20281, 'synset': 'beach_plum.n.01', 'name': 'beach_plum'}, {'id': 20282, 'synset': 'common_plum.n.01', 'name': 'common_plum'}, {'id': 20283, 'synset': 'bullace.n.01', 'name': 'bullace'}, {'id': 20284, 'synset': 'damson_plum.n.02', 'name': 'damson_plum'}, {'id': 20285, 'synset': 'big-tree_plum.n.01', 'name': 'big-tree_plum'}, {'id': 20286, 'synset': 'canada_plum.n.01', 'name': 'Canada_plum'}, {'id': 20287, 'synset': 'plumcot.n.01', 'name': 'plumcot'}, {'id': 20288, 'synset': 'apricot.n.01', 'name': 'apricot'}, {'id': 20289, 'synset': 'japanese_apricot.n.01', 'name': 'Japanese_apricot'}, {'id': 20290, 'synset': 'common_apricot.n.01', 'name': 'common_apricot'}, {'id': 20291, 'synset': 'purple_apricot.n.01', 'name': 'purple_apricot'}, {'id': 20292, 'synset': 'cherry.n.02', 'name': 'cherry'}, {'id': 20293, 'synset': 'wild_cherry.n.02', 'name': 'wild_cherry'}, {'id': 20294, 'synset': 'wild_cherry.n.01', 'name': 'wild_cherry'}, {'id': 20295, 'synset': 'sweet_cherry.n.01', 'name': 'sweet_cherry'}, {'id': 20296, 'synset': 'heart_cherry.n.01', 'name': 'heart_cherry'}, {'id': 20297, 'synset': 'gean.n.01', 'name': 'gean'}, {'id': 20298, 'synset': 'capulin.n.01', 'name': 'capulin'}, {'id': 20299, 'synset': 'cherry_laurel.n.02', 'name': 'cherry_laurel'}, {'id': 20300, 'synset': 'cherry_plum.n.01', 'name': 'cherry_plum'}, {'id': 20301, 'synset': 'sour_cherry.n.01', 'name': 'sour_cherry'}, {'id': 20302, 'synset': 'amarelle.n.01', 'name': 'amarelle'}, {'id': 20303, 'synset': 'morello.n.01', 'name': 'morello'}, {'id': 20304, 'synset': 'marasca.n.01', 'name': 'marasca'}, {'id': 20305, 'synset': 'almond_tree.n.01', 'name': 'almond_tree'}, {'id': 20306, 'synset': 'almond.n.01', 'name': 'almond'}, {'id': 20307, 'synset': 'bitter_almond.n.01', 'name': 'bitter_almond'}, {'id': 20308, 'synset': 'jordan_almond.n.01', 'name': 'jordan_almond'}, {'id': 20309, 'synset': 'dwarf_flowering_almond.n.01', 'name': 'dwarf_flowering_almond'}, {'id': 20310, 'synset': 'holly-leaved_cherry.n.01', 'name': 'holly-leaved_cherry'}, {'id': 20311, 'synset': 'fuji.n.01', 'name': 'fuji'}, {'id': 20312, 'synset': 'flowering_almond.n.02', 'name': 'flowering_almond'}, {'id': 20313, 'synset': 'cherry_laurel.n.01', 'name': 'cherry_laurel'}, {'id': 20314, 'synset': 'catalina_cherry.n.01', 'name': 'Catalina_cherry'}, {'id': 20315, 'synset': 'bird_cherry.n.01', 'name': 'bird_cherry'}, {'id': 20316, 'synset': 'hagberry_tree.n.01', 'name': 'hagberry_tree'}, {'id': 20317, 'synset': 'hagberry.n.01', 'name': 'hagberry'}, {'id': 20318, 'synset': 'pin_cherry.n.01', 'name': 'pin_cherry'}, {'id': 20319, 'synset': 'peach.n.01', 'name': 'peach'}, {'id': 20320, 'synset': 'nectarine.n.01', 'name': 'nectarine'}, {'id': 20321, 'synset': 'sand_cherry.n.01', 'name': 'sand_cherry'}, {'id': 20322, 'synset': 'japanese_plum.n.01', 'name': 'Japanese_plum'}, {'id': 20323, 'synset': 'black_cherry.n.01', 'name': 'black_cherry'}, {'id': 20324, 'synset': 'flowering_cherry.n.01', 'name': 'flowering_cherry'}, {'id': 20325, 'synset': 'oriental_cherry.n.01', 'name': 'oriental_cherry'}, {'id': 20326, 'synset': 'japanese_flowering_cherry.n.01', 'name': 'Japanese_flowering_cherry'}, {'id': 20327, 'synset': 'sierra_plum.n.01', 'name': 'Sierra_plum'}, {'id': 20328, 'synset': 'rosebud_cherry.n.01', 'name': 'rosebud_cherry'}, {'id': 20329, 'synset': 'russian_almond.n.01', 'name': 'Russian_almond'}, {'id': 20330, 'synset': 'flowering_almond.n.01', 'name': 'flowering_almond'}, {'id': 20331, 'synset': 'chokecherry.n.02', 'name': 'chokecherry'}, {'id': 20332, 'synset': 'chokecherry.n.01', 'name': 'chokecherry'}, {'id': 20333, 'synset': 'western_chokecherry.n.01', 'name': 'western_chokecherry'}, {'id': 20334, 'synset': 'pyracantha.n.01', 'name': 'Pyracantha'}, {'id': 20335, 'synset': 'pear.n.02', 'name': 'pear'}, {'id': 20336, 'synset': 'fruit_tree.n.01', 'name': 'fruit_tree'}, {'id': 20337, 'synset': 'bramble_bush.n.01', 'name': 'bramble_bush'}, {'id': 20338, 'synset': 'lawyerbush.n.01', 'name': 'lawyerbush'}, {'id': 20339, 'synset': 'stone_bramble.n.01', 'name': 'stone_bramble'}, {'id': 20340, 'synset': 'sand_blackberry.n.01', 'name': 'sand_blackberry'}, {'id': 20341, 'synset': 'boysenberry.n.01', 'name': 'boysenberry'}, {'id': 20342, 'synset': 'loganberry.n.01', 'name': 'loganberry'}, {'id': 20343, 'synset': 'american_dewberry.n.02', 'name': 'American_dewberry'}, {'id': 20344, 'synset': 'northern_dewberry.n.01', 'name': 'Northern_dewberry'}, {'id': 20345, 'synset': 'southern_dewberry.n.01', 'name': 'Southern_dewberry'}, {'id': 20346, 'synset': 'swamp_dewberry.n.01', 'name': 'swamp_dewberry'}, {'id': 20347, 'synset': 'european_dewberry.n.01', 'name': 'European_dewberry'}, {'id': 20348, 'synset': 'raspberry.n.01', 'name': 'raspberry'}, {'id': 20349, 'synset': 'wild_raspberry.n.01', 'name': 'wild_raspberry'}, {'id': 20350, 'synset': 'american_raspberry.n.01', 'name': 'American_raspberry'}, {'id': 20351, 'synset': 'black_raspberry.n.01', 'name': 'black_raspberry'}, {'id': 20352, 'synset': 'salmonberry.n.03', 'name': 'salmonberry'}, {'id': 20353, 'synset': 'salmonberry.n.02', 'name': 'salmonberry'}, {'id': 20354, 'synset': 'wineberry.n.01', 'name': 'wineberry'}, {'id': 20355, 'synset': 'mountain_ash.n.01', 'name': 'mountain_ash'}, {'id': 20356, 'synset': 'rowan.n.01', 'name': 'rowan'}, {'id': 20357, 'synset': 'rowanberry.n.01', 'name': 'rowanberry'}, {'id': 20358, 'synset': 'american_mountain_ash.n.01', 'name': 'American_mountain_ash'}, {'id': 20359, 'synset': 'western_mountain_ash.n.01', 'name': 'Western_mountain_ash'}, {'id': 20360, 'synset': 'service_tree.n.01', 'name': 'service_tree'}, {'id': 20361, 'synset': 'wild_service_tree.n.01', 'name': 'wild_service_tree'}, {'id': 20362, 'synset': 'spirea.n.02', 'name': 'spirea'}, {'id': 20363, 'synset': 'bridal_wreath.n.02', 'name': 'bridal_wreath'}, {'id': 20364, 'synset': 'madderwort.n.01', 'name': 'madderwort'}, {'id': 20365, 'synset': 'indian_madder.n.01', 'name': 'Indian_madder'}, {'id': 20366, 'synset': 'madder.n.01', 'name': 'madder'}, {'id': 20367, 'synset': 'woodruff.n.02', 'name': 'woodruff'}, {'id': 20368, 'synset': 'dagame.n.01', 'name': 'dagame'}, {'id': 20369, 'synset': 'blolly.n.01', 'name': 'blolly'}, {'id': 20370, 'synset': 'coffee.n.02', 'name': 'coffee'}, {'id': 20371, 'synset': 'arabian_coffee.n.01', 'name': 'Arabian_coffee'}, {'id': 20372, 'synset': 'liberian_coffee.n.01', 'name': 'Liberian_coffee'}, {'id': 20373, 'synset': 'robusta_coffee.n.01', 'name': 'robusta_coffee'}, {'id': 20374, 'synset': 'cinchona.n.02', 'name': 'cinchona'}, {'id': 20375, 'synset': 'cartagena_bark.n.01', 'name': 'Cartagena_bark'}, {'id': 20376, 'synset': 'calisaya.n.01', 'name': 'calisaya'}, {'id': 20377, 'synset': 'cinchona_tree.n.01', 'name': 'cinchona_tree'}, {'id': 20378, 'synset': 'cinchona.n.01', 'name': 'cinchona'}, {'id': 20379, 'synset': 'bedstraw.n.01', 'name': 'bedstraw'}, {'id': 20380, 'synset': 'sweet_woodruff.n.01', 'name': 'sweet_woodruff'}, {'id': 20381, 'synset': 'northern_bedstraw.n.01', 'name': 'Northern_bedstraw'}, {'id': 20382, 'synset': 'yellow_bedstraw.n.01', 'name': 'yellow_bedstraw'}, {'id': 20383, 'synset': 'wild_licorice.n.01', 'name': 'wild_licorice'}, {'id': 20384, 'synset': 'cleavers.n.01', 'name': 'cleavers'}, {'id': 20385, 'synset': 'wild_madder.n.01', 'name': 'wild_madder'}, {'id': 20386, 'synset': 'cape_jasmine.n.01', 'name': 'cape_jasmine'}, {'id': 20387, 'synset': 'genipa.n.01', 'name': 'genipa'}, {'id': 20388, 'synset': 'genipap_fruit.n.01', 'name': 'genipap_fruit'}, {'id': 20389, 'synset': 'hamelia.n.01', 'name': 'hamelia'}, {'id': 20390, 'synset': 'scarlet_bush.n.01', 'name': 'scarlet_bush'}, {'id': 20391, 'synset': 'lemonwood.n.02', 'name': 'lemonwood'}, {'id': 20392, 'synset': 'negro_peach.n.01', 'name': 'negro_peach'}, {'id': 20393, 'synset': 'wild_medlar.n.01', 'name': 'wild_medlar'}, {'id': 20394, 'synset': 'spanish_tamarind.n.01', 'name': 'Spanish_tamarind'}, {'id': 20395, 'synset': 'abelia.n.01', 'name': 'abelia'}, {'id': 20396, 'synset': 'bush_honeysuckle.n.02', 'name': 'bush_honeysuckle'}, {'id': 20397, 'synset': 'american_twinflower.n.01', 'name': 'American_twinflower'}, {'id': 20398, 'synset': 'honeysuckle.n.01', 'name': 'honeysuckle'}, {'id': 20399, 'synset': 'american_fly_honeysuckle.n.01', 'name': 'American_fly_honeysuckle'}, {'id': 20400, 'synset': 'italian_honeysuckle.n.01', 'name': 'Italian_honeysuckle'}, {'id': 20401, 'synset': 'yellow_honeysuckle.n.01', 'name': 'yellow_honeysuckle'}, {'id': 20402, 'synset': 'hairy_honeysuckle.n.01', 'name': 'hairy_honeysuckle'}, {'id': 20403, 'synset': 'japanese_honeysuckle.n.01', 'name': 'Japanese_honeysuckle'}, {'id': 20404, 'synset': "hall's_honeysuckle.n.01", 'name': "Hall's_honeysuckle"}, {'id': 20405, 'synset': "morrow's_honeysuckle.n.01", 'name': "Morrow's_honeysuckle"}, {'id': 20406, 'synset': 'woodbine.n.02', 'name': 'woodbine'}, {'id': 20407, 'synset': 'trumpet_honeysuckle.n.01', 'name': 'trumpet_honeysuckle'}, {'id': 20408, 'synset': 'european_fly_honeysuckle.n.01', 'name': 'European_fly_honeysuckle'}, {'id': 20409, 'synset': 'swamp_fly_honeysuckle.n.01', 'name': 'swamp_fly_honeysuckle'}, {'id': 20410, 'synset': 'snowberry.n.01', 'name': 'snowberry'}, {'id': 20411, 'synset': 'coralberry.n.01', 'name': 'coralberry'}, {'id': 20412, 'synset': 'blue_elder.n.01', 'name': 'blue_elder'}, {'id': 20413, 'synset': 'dwarf_elder.n.01', 'name': 'dwarf_elder'}, {'id': 20414, 'synset': 'american_red_elder.n.01', 'name': 'American_red_elder'}, {'id': 20415, 'synset': 'european_red_elder.n.01', 'name': 'European_red_elder'}, {'id': 20416, 'synset': 'feverroot.n.01', 'name': 'feverroot'}, {'id': 20417, 'synset': 'cranberry_bush.n.01', 'name': 'cranberry_bush'}, {'id': 20418, 'synset': 'wayfaring_tree.n.01', 'name': 'wayfaring_tree'}, {'id': 20419, 'synset': 'guelder_rose.n.01', 'name': 'guelder_rose'}, {'id': 20420, 'synset': 'arrow_wood.n.01', 'name': 'arrow_wood'}, {'id': 20421, 'synset': 'black_haw.n.02', 'name': 'black_haw'}, {'id': 20422, 'synset': 'weigela.n.01', 'name': 'weigela'}, {'id': 20423, 'synset': 'teasel.n.01', 'name': 'teasel'}, {'id': 20424, 'synset': 'common_teasel.n.01', 'name': 'common_teasel'}, {'id': 20425, 'synset': "fuller's_teasel.n.01", 'name': "fuller's_teasel"}, {'id': 20426, 'synset': 'wild_teasel.n.01', 'name': 'wild_teasel'}, {'id': 20427, 'synset': 'scabious.n.01', 'name': 'scabious'}, {'id': 20428, 'synset': 'sweet_scabious.n.01', 'name': 'sweet_scabious'}, {'id': 20429, 'synset': 'field_scabious.n.01', 'name': 'field_scabious'}, {'id': 20430, 'synset': 'jewelweed.n.01', 'name': 'jewelweed'}, {'id': 20431, 'synset': 'geranium.n.01', 'name': 'geranium'}, {'id': 20432, 'synset': 'cranesbill.n.01', 'name': 'cranesbill'}, {'id': 20433, 'synset': 'wild_geranium.n.01', 'name': 'wild_geranium'}, {'id': 20434, 'synset': 'meadow_cranesbill.n.01', 'name': 'meadow_cranesbill'}, {'id': 20435, 'synset': "richardson's_geranium.n.01", 'name': "Richardson's_geranium"}, {'id': 20436, 'synset': 'herb_robert.n.01', 'name': 'herb_robert'}, {'id': 20437, 'synset': 'sticky_geranium.n.01', 'name': 'sticky_geranium'}, {'id': 20438, 'synset': "dove's_foot_geranium.n.01", 'name': "dove's_foot_geranium"}, {'id': 20439, 'synset': 'rose_geranium.n.01', 'name': 'rose_geranium'}, {'id': 20440, 'synset': 'fish_geranium.n.01', 'name': 'fish_geranium'}, {'id': 20441, 'synset': 'ivy_geranium.n.01', 'name': 'ivy_geranium'}, {'id': 20442, 'synset': 'apple_geranium.n.01', 'name': 'apple_geranium'}, {'id': 20443, 'synset': 'lemon_geranium.n.01', 'name': 'lemon_geranium'}, {'id': 20444, 'synset': 'storksbill.n.01', 'name': 'storksbill'}, {'id': 20445, 'synset': 'musk_clover.n.01', 'name': 'musk_clover'}, {'id': 20446, 'synset': 'incense_tree.n.01', 'name': 'incense_tree'}, {'id': 20447, 'synset': 'elephant_tree.n.01', 'name': 'elephant_tree'}, {'id': 20448, 'synset': 'gumbo-limbo.n.01', 'name': 'gumbo-limbo'}, {'id': 20449, 'synset': 'boswellia_carteri.n.01', 'name': 'Boswellia_carteri'}, {'id': 20450, 'synset': 'salai.n.01', 'name': 'salai'}, {'id': 20451, 'synset': 'balm_of_gilead.n.03', 'name': 'balm_of_gilead'}, {'id': 20452, 'synset': 'myrrh_tree.n.01', 'name': 'myrrh_tree'}, {'id': 20453, 'synset': 'protium_heptaphyllum.n.01', 'name': 'Protium_heptaphyllum'}, {'id': 20454, 'synset': 'protium_guianense.n.01', 'name': 'Protium_guianense'}, {'id': 20455, 'synset': 'water_starwort.n.01', 'name': 'water_starwort'}, {'id': 20456, 'synset': 'barbados_cherry.n.01', 'name': 'barbados_cherry'}, {'id': 20457, 'synset': 'mahogany.n.02', 'name': 'mahogany'}, {'id': 20458, 'synset': 'chinaberry.n.02', 'name': 'chinaberry'}, {'id': 20459, 'synset': 'neem.n.01', 'name': 'neem'}, {'id': 20460, 'synset': 'neem_seed.n.01', 'name': 'neem_seed'}, {'id': 20461, 'synset': 'spanish_cedar.n.01', 'name': 'Spanish_cedar'}, {'id': 20462, 'synset': 'satinwood.n.03', 'name': 'satinwood'}, {'id': 20463, 'synset': 'african_scented_mahogany.n.01', 'name': 'African_scented_mahogany'}, {'id': 20464, 'synset': 'silver_ash.n.01', 'name': 'silver_ash'}, {'id': 20465, 'synset': 'native_beech.n.01', 'name': 'native_beech'}, {'id': 20466, 'synset': 'bunji-bunji.n.01', 'name': 'bunji-bunji'}, {'id': 20467, 'synset': 'african_mahogany.n.01', 'name': 'African_mahogany'}, {'id': 20468, 'synset': 'lanseh_tree.n.01', 'name': 'lanseh_tree'}, {'id': 20469, 'synset': 'true_mahogany.n.01', 'name': 'true_mahogany'}, {'id': 20470, 'synset': 'honduras_mahogany.n.01', 'name': 'Honduras_mahogany'}, {'id': 20471, 'synset': 'philippine_mahogany.n.02', 'name': 'Philippine_mahogany'}, {'id': 20472, 'synset': 'caracolito.n.01', 'name': 'caracolito'}, {'id': 20473, 'synset': 'common_wood_sorrel.n.01', 'name': 'common_wood_sorrel'}, {'id': 20474, 'synset': 'bermuda_buttercup.n.01', 'name': 'Bermuda_buttercup'}, {'id': 20475, 'synset': 'creeping_oxalis.n.01', 'name': 'creeping_oxalis'}, {'id': 20476, 'synset': 'goatsfoot.n.01', 'name': 'goatsfoot'}, {'id': 20477, 'synset': 'violet_wood_sorrel.n.01', 'name': 'violet_wood_sorrel'}, {'id': 20478, 'synset': 'oca.n.01', 'name': 'oca'}, {'id': 20479, 'synset': 'carambola.n.01', 'name': 'carambola'}, {'id': 20480, 'synset': 'bilimbi.n.01', 'name': 'bilimbi'}, {'id': 20481, 'synset': 'milkwort.n.01', 'name': 'milkwort'}, {'id': 20482, 'synset': 'senega.n.02', 'name': 'senega'}, {'id': 20483, 'synset': 'orange_milkwort.n.01', 'name': 'orange_milkwort'}, {'id': 20484, 'synset': 'flowering_wintergreen.n.01', 'name': 'flowering_wintergreen'}, {'id': 20485, 'synset': 'seneca_snakeroot.n.01', 'name': 'Seneca_snakeroot'}, {'id': 20486, 'synset': 'common_milkwort.n.01', 'name': 'common_milkwort'}, {'id': 20487, 'synset': 'rue.n.01', 'name': 'rue'}, {'id': 20488, 'synset': 'citrus.n.02', 'name': 'citrus'}, {'id': 20489, 'synset': 'orange.n.03', 'name': 'orange'}, {'id': 20490, 'synset': 'sour_orange.n.01', 'name': 'sour_orange'}, {'id': 20491, 'synset': 'bergamot.n.01', 'name': 'bergamot'}, {'id': 20492, 'synset': 'pomelo.n.01', 'name': 'pomelo'}, {'id': 20493, 'synset': 'citron.n.02', 'name': 'citron'}, {'id': 20494, 'synset': 'grapefruit.n.01', 'name': 'grapefruit'}, {'id': 20495, 'synset': 'mandarin.n.01', 'name': 'mandarin'}, {'id': 20496, 'synset': 'tangerine.n.01', 'name': 'tangerine'}, {'id': 20497, 'synset': 'satsuma.n.01', 'name': 'satsuma'}, {'id': 20498, 'synset': 'sweet_orange.n.02', 'name': 'sweet_orange'}, {'id': 20499, 'synset': 'temple_orange.n.01', 'name': 'temple_orange'}, {'id': 20500, 'synset': 'tangelo.n.01', 'name': 'tangelo'}, {'id': 20501, 'synset': 'rangpur.n.01', 'name': 'rangpur'}, {'id': 20502, 'synset': 'lemon.n.03', 'name': 'lemon'}, {'id': 20503, 'synset': 'sweet_lemon.n.01', 'name': 'sweet_lemon'}, {'id': 20504, 'synset': 'lime.n.04', 'name': 'lime'}, {'id': 20505, 'synset': 'citrange.n.01', 'name': 'citrange'}, {'id': 20506, 'synset': 'fraxinella.n.01', 'name': 'fraxinella'}, {'id': 20507, 'synset': 'kumquat.n.01', 'name': 'kumquat'}, {'id': 20508, 'synset': 'marumi.n.01', 'name': 'marumi'}, {'id': 20509, 'synset': 'nagami.n.01', 'name': 'nagami'}, {'id': 20510, 'synset': 'cork_tree.n.01', 'name': 'cork_tree'}, {'id': 20511, 'synset': 'trifoliate_orange.n.01', 'name': 'trifoliate_orange'}, {'id': 20512, 'synset': 'prickly_ash.n.01', 'name': 'prickly_ash'}, {'id': 20513, 'synset': 'toothache_tree.n.01', 'name': 'toothache_tree'}, {'id': 20514, 'synset': "hercules'-club.n.01", 'name': "Hercules'-club"}, {'id': 20515, 'synset': 'bitterwood_tree.n.01', 'name': 'bitterwood_tree'}, {'id': 20516, 'synset': 'marupa.n.01', 'name': 'marupa'}, {'id': 20517, 'synset': 'paradise_tree.n.01', 'name': 'paradise_tree'}, {'id': 20518, 'synset': 'ailanthus.n.01', 'name': 'ailanthus'}, {'id': 20519, 'synset': 'tree_of_heaven.n.01', 'name': 'tree_of_heaven'}, {'id': 20520, 'synset': 'wild_mango.n.01', 'name': 'wild_mango'}, {'id': 20521, 'synset': 'pepper_tree.n.02', 'name': 'pepper_tree'}, {'id': 20522, 'synset': 'jamaica_quassia.n.02', 'name': 'Jamaica_quassia'}, {'id': 20523, 'synset': 'quassia.n.02', 'name': 'quassia'}, {'id': 20524, 'synset': 'nasturtium.n.01', 'name': 'nasturtium'}, {'id': 20525, 'synset': 'garden_nasturtium.n.01', 'name': 'garden_nasturtium'}, {'id': 20526, 'synset': 'bush_nasturtium.n.01', 'name': 'bush_nasturtium'}, {'id': 20527, 'synset': 'canarybird_flower.n.01', 'name': 'canarybird_flower'}, {'id': 20528, 'synset': 'bean_caper.n.01', 'name': 'bean_caper'}, {'id': 20529, 'synset': 'palo_santo.n.01', 'name': 'palo_santo'}, {'id': 20530, 'synset': 'lignum_vitae.n.02', 'name': 'lignum_vitae'}, {'id': 20531, 'synset': 'creosote_bush.n.01', 'name': 'creosote_bush'}, {'id': 20532, 'synset': 'caltrop.n.01', 'name': 'caltrop'}, {'id': 20533, 'synset': 'willow.n.01', 'name': 'willow'}, {'id': 20534, 'synset': 'osier.n.02', 'name': 'osier'}, {'id': 20535, 'synset': 'white_willow.n.01', 'name': 'white_willow'}, {'id': 20536, 'synset': 'silver_willow.n.01', 'name': 'silver_willow'}, {'id': 20537, 'synset': 'golden_willow.n.01', 'name': 'golden_willow'}, {'id': 20538, 'synset': 'cricket-bat_willow.n.01', 'name': 'cricket-bat_willow'}, {'id': 20539, 'synset': 'arctic_willow.n.01', 'name': 'arctic_willow'}, {'id': 20540, 'synset': 'weeping_willow.n.01', 'name': 'weeping_willow'}, {'id': 20541, 'synset': 'wisconsin_weeping_willow.n.01', 'name': 'Wisconsin_weeping_willow'}, {'id': 20542, 'synset': 'pussy_willow.n.01', 'name': 'pussy_willow'}, {'id': 20543, 'synset': 'sallow.n.01', 'name': 'sallow'}, {'id': 20544, 'synset': 'goat_willow.n.01', 'name': 'goat_willow'}, {'id': 20545, 'synset': 'peachleaf_willow.n.01', 'name': 'peachleaf_willow'}, {'id': 20546, 'synset': 'almond_willow.n.01', 'name': 'almond_willow'}, {'id': 20547, 'synset': 'hoary_willow.n.01', 'name': 'hoary_willow'}, {'id': 20548, 'synset': 'crack_willow.n.01', 'name': 'crack_willow'}, {'id': 20549, 'synset': 'prairie_willow.n.01', 'name': 'prairie_willow'}, {'id': 20550, 'synset': 'dwarf_willow.n.01', 'name': 'dwarf_willow'}, {'id': 20551, 'synset': 'grey_willow.n.01', 'name': 'grey_willow'}, {'id': 20552, 'synset': 'arroyo_willow.n.01', 'name': 'arroyo_willow'}, {'id': 20553, 'synset': 'shining_willow.n.01', 'name': 'shining_willow'}, {'id': 20554, 'synset': 'swamp_willow.n.01', 'name': 'swamp_willow'}, {'id': 20555, 'synset': 'bay_willow.n.01', 'name': 'bay_willow'}, {'id': 20556, 'synset': 'purple_willow.n.01', 'name': 'purple_willow'}, {'id': 20557, 'synset': 'balsam_willow.n.01', 'name': 'balsam_willow'}, {'id': 20558, 'synset': 'creeping_willow.n.01', 'name': 'creeping_willow'}, {'id': 20559, 'synset': 'sitka_willow.n.01', 'name': 'Sitka_willow'}, {'id': 20560, 'synset': 'dwarf_grey_willow.n.01', 'name': 'dwarf_grey_willow'}, {'id': 20561, 'synset': 'bearberry_willow.n.01', 'name': 'bearberry_willow'}, {'id': 20562, 'synset': 'common_osier.n.01', 'name': 'common_osier'}, {'id': 20563, 'synset': 'poplar.n.02', 'name': 'poplar'}, {'id': 20564, 'synset': 'balsam_poplar.n.01', 'name': 'balsam_poplar'}, {'id': 20565, 'synset': 'white_poplar.n.01', 'name': 'white_poplar'}, {'id': 20566, 'synset': 'grey_poplar.n.01', 'name': 'grey_poplar'}, {'id': 20567, 'synset': 'black_poplar.n.01', 'name': 'black_poplar'}, {'id': 20568, 'synset': 'lombardy_poplar.n.01', 'name': 'Lombardy_poplar'}, {'id': 20569, 'synset': 'cottonwood.n.01', 'name': 'cottonwood'}, {'id': 20570, 'synset': 'eastern_cottonwood.n.01', 'name': 'Eastern_cottonwood'}, {'id': 20571, 'synset': 'black_cottonwood.n.02', 'name': 'black_cottonwood'}, {'id': 20572, 'synset': 'swamp_cottonwood.n.01', 'name': 'swamp_cottonwood'}, {'id': 20573, 'synset': 'aspen.n.01', 'name': 'aspen'}, {'id': 20574, 'synset': 'quaking_aspen.n.01', 'name': 'quaking_aspen'}, {'id': 20575, 'synset': 'american_quaking_aspen.n.01', 'name': 'American_quaking_aspen'}, {'id': 20576, 'synset': 'canadian_aspen.n.01', 'name': 'Canadian_aspen'}, {'id': 20577, 'synset': 'sandalwood_tree.n.01', 'name': 'sandalwood_tree'}, {'id': 20578, 'synset': 'quandong.n.01', 'name': 'quandong'}, {'id': 20579, 'synset': 'rabbitwood.n.01', 'name': 'rabbitwood'}, {'id': 20580, 'synset': 'loranthaceae.n.01', 'name': 'Loranthaceae'}, {'id': 20581, 'synset': 'mistletoe.n.03', 'name': 'mistletoe'}, {'id': 20582, 'synset': 'american_mistletoe.n.02', 'name': 'American_mistletoe'}, {'id': 20583, 'synset': 'mistletoe.n.02', 'name': 'mistletoe'}, {'id': 20584, 'synset': 'american_mistletoe.n.01', 'name': 'American_mistletoe'}, {'id': 20585, 'synset': 'aalii.n.01', 'name': 'aalii'}, {'id': 20586, 'synset': 'soapberry.n.01', 'name': 'soapberry'}, {'id': 20587, 'synset': 'wild_china_tree.n.01', 'name': 'wild_China_tree'}, {'id': 20588, 'synset': 'china_tree.n.01', 'name': 'China_tree'}, {'id': 20589, 'synset': 'akee.n.01', 'name': 'akee'}, {'id': 20590, 'synset': 'soapberry_vine.n.01', 'name': 'soapberry_vine'}, {'id': 20591, 'synset': 'heartseed.n.01', 'name': 'heartseed'}, {'id': 20592, 'synset': 'balloon_vine.n.01', 'name': 'balloon_vine'}, {'id': 20593, 'synset': 'longan.n.01', 'name': 'longan'}, {'id': 20594, 'synset': 'harpullia.n.01', 'name': 'harpullia'}, {'id': 20595, 'synset': 'harpulla.n.01', 'name': 'harpulla'}, {'id': 20596, 'synset': 'moreton_bay_tulipwood.n.01', 'name': 'Moreton_Bay_tulipwood'}, {'id': 20597, 'synset': 'litchi.n.01', 'name': 'litchi'}, {'id': 20598, 'synset': 'spanish_lime.n.01', 'name': 'Spanish_lime'}, {'id': 20599, 'synset': 'rambutan.n.01', 'name': 'rambutan'}, {'id': 20600, 'synset': 'pulasan.n.01', 'name': 'pulasan'}, {'id': 20601, 'synset': 'pachysandra.n.01', 'name': 'pachysandra'}, {'id': 20602, 'synset': 'allegheny_spurge.n.01', 'name': 'Allegheny_spurge'}, {'id': 20603, 'synset': 'bittersweet.n.02', 'name': 'bittersweet'}, {'id': 20604, 'synset': 'spindle_tree.n.01', 'name': 'spindle_tree'}, {'id': 20605, 'synset': 'winged_spindle_tree.n.01', 'name': 'winged_spindle_tree'}, {'id': 20606, 'synset': 'wahoo.n.02', 'name': 'wahoo'}, {'id': 20607, 'synset': 'strawberry_bush.n.01', 'name': 'strawberry_bush'}, {'id': 20608, 'synset': 'evergreen_bittersweet.n.01', 'name': 'evergreen_bittersweet'}, {'id': 20609, 'synset': 'cyrilla.n.01', 'name': 'cyrilla'}, {'id': 20610, 'synset': 'titi.n.01', 'name': 'titi'}, {'id': 20611, 'synset': 'crowberry.n.01', 'name': 'crowberry'}, {'id': 20612, 'synset': 'maple.n.02', 'name': 'maple'}, {'id': 20613, 'synset': 'silver_maple.n.01', 'name': 'silver_maple'}, {'id': 20614, 'synset': 'sugar_maple.n.01', 'name': 'sugar_maple'}, {'id': 20615, 'synset': 'red_maple.n.01', 'name': 'red_maple'}, {'id': 20616, 'synset': 'moosewood.n.01', 'name': 'moosewood'}, {'id': 20617, 'synset': 'oregon_maple.n.01', 'name': 'Oregon_maple'}, {'id': 20618, 'synset': 'dwarf_maple.n.01', 'name': 'dwarf_maple'}, {'id': 20619, 'synset': 'mountain_maple.n.01', 'name': 'mountain_maple'}, {'id': 20620, 'synset': 'vine_maple.n.01', 'name': 'vine_maple'}, {'id': 20621, 'synset': 'hedge_maple.n.01', 'name': 'hedge_maple'}, {'id': 20622, 'synset': 'norway_maple.n.01', 'name': 'Norway_maple'}, {'id': 20623, 'synset': 'sycamore.n.03', 'name': 'sycamore'}, {'id': 20624, 'synset': 'box_elder.n.01', 'name': 'box_elder'}, {'id': 20625, 'synset': 'california_box_elder.n.01', 'name': 'California_box_elder'}, {'id': 20626, 'synset': 'pointed-leaf_maple.n.01', 'name': 'pointed-leaf_maple'}, {'id': 20627, 'synset': 'japanese_maple.n.02', 'name': 'Japanese_maple'}, {'id': 20628, 'synset': 'japanese_maple.n.01', 'name': 'Japanese_maple'}, {'id': 20629, 'synset': 'holly.n.01', 'name': 'holly'}, {'id': 20630, 'synset': 'chinese_holly.n.01', 'name': 'Chinese_holly'}, {'id': 20631, 'synset': 'bearberry.n.02', 'name': 'bearberry'}, {'id': 20632, 'synset': 'inkberry.n.01', 'name': 'inkberry'}, {'id': 20633, 'synset': 'mate.n.07', 'name': 'mate'}, {'id': 20634, 'synset': 'american_holly.n.01', 'name': 'American_holly'}, {'id': 20635, 'synset': 'low_gallberry_holly.n.01', 'name': 'low_gallberry_holly'}, {'id': 20636, 'synset': 'tall_gallberry_holly.n.01', 'name': 'tall_gallberry_holly'}, {'id': 20637, 'synset': 'yaupon_holly.n.01', 'name': 'yaupon_holly'}, {'id': 20638, 'synset': 'deciduous_holly.n.01', 'name': 'deciduous_holly'}, {'id': 20639, 'synset': 'juneberry_holly.n.01', 'name': 'juneberry_holly'}, {'id': 20640, 'synset': 'largeleaf_holly.n.01', 'name': 'largeleaf_holly'}, {'id': 20641, 'synset': 'geogia_holly.n.01', 'name': 'Geogia_holly'}, {'id': 20642, 'synset': 'common_winterberry_holly.n.01', 'name': 'common_winterberry_holly'}, {'id': 20643, 'synset': 'smooth_winterberry_holly.n.01', 'name': 'smooth_winterberry_holly'}, {'id': 20644, 'synset': 'cashew.n.01', 'name': 'cashew'}, {'id': 20645, 'synset': 'goncalo_alves.n.01', 'name': 'goncalo_alves'}, {'id': 20646, 'synset': 'venetian_sumac.n.01', 'name': 'Venetian_sumac'}, {'id': 20647, 'synset': 'laurel_sumac.n.01', 'name': 'laurel_sumac'}, {'id': 20648, 'synset': 'mango.n.01', 'name': 'mango'}, {'id': 20649, 'synset': 'pistachio.n.01', 'name': 'pistachio'}, {'id': 20650, 'synset': 'terebinth.n.01', 'name': 'terebinth'}, {'id': 20651, 'synset': 'mastic.n.03', 'name': 'mastic'}, {'id': 20652, 'synset': 'australian_sumac.n.01', 'name': 'Australian_sumac'}, {'id': 20653, 'synset': 'sumac.n.02', 'name': 'sumac'}, {'id': 20654, 'synset': 'smooth_sumac.n.01', 'name': 'smooth_sumac'}, {'id': 20655, 'synset': 'sugar-bush.n.01', 'name': 'sugar-bush'}, {'id': 20656, 'synset': 'staghorn_sumac.n.01', 'name': 'staghorn_sumac'}, {'id': 20657, 'synset': 'squawbush.n.01', 'name': 'squawbush'}, {'id': 20658, 'synset': 'aroeira_blanca.n.01', 'name': 'aroeira_blanca'}, {'id': 20659, 'synset': 'pepper_tree.n.01', 'name': 'pepper_tree'}, {'id': 20660, 'synset': 'brazilian_pepper_tree.n.01', 'name': 'Brazilian_pepper_tree'}, {'id': 20661, 'synset': 'hog_plum.n.01', 'name': 'hog_plum'}, {'id': 20662, 'synset': 'mombin.n.01', 'name': 'mombin'}, {'id': 20663, 'synset': 'poison_ash.n.01', 'name': 'poison_ash'}, {'id': 20664, 'synset': 'poison_ivy.n.02', 'name': 'poison_ivy'}, {'id': 20665, 'synset': 'western_poison_oak.n.01', 'name': 'western_poison_oak'}, {'id': 20666, 'synset': 'eastern_poison_oak.n.01', 'name': 'eastern_poison_oak'}, {'id': 20667, 'synset': 'varnish_tree.n.02', 'name': 'varnish_tree'}, {'id': 20668, 'synset': 'horse_chestnut.n.01', 'name': 'horse_chestnut'}, {'id': 20669, 'synset': 'buckeye.n.01', 'name': 'buckeye'}, {'id': 20670, 'synset': 'sweet_buckeye.n.01', 'name': 'sweet_buckeye'}, {'id': 20671, 'synset': 'ohio_buckeye.n.01', 'name': 'Ohio_buckeye'}, {'id': 20672, 'synset': 'dwarf_buckeye.n.01', 'name': 'dwarf_buckeye'}, {'id': 20673, 'synset': 'red_buckeye.n.01', 'name': 'red_buckeye'}, {'id': 20674, 'synset': 'particolored_buckeye.n.01', 'name': 'particolored_buckeye'}, {'id': 20675, 'synset': 'ebony.n.03', 'name': 'ebony'}, {'id': 20676, 'synset': 'marblewood.n.02', 'name': 'marblewood'}, {'id': 20677, 'synset': 'marblewood.n.01', 'name': 'marblewood'}, {'id': 20678, 'synset': 'persimmon.n.01', 'name': 'persimmon'}, {'id': 20679, 'synset': 'japanese_persimmon.n.01', 'name': 'Japanese_persimmon'}, {'id': 20680, 'synset': 'american_persimmon.n.01', 'name': 'American_persimmon'}, {'id': 20681, 'synset': 'date_plum.n.01', 'name': 'date_plum'}, {'id': 20682, 'synset': 'buckthorn.n.02', 'name': 'buckthorn'}, {'id': 20683, 'synset': 'southern_buckthorn.n.01', 'name': 'southern_buckthorn'}, {'id': 20684, 'synset': 'false_buckthorn.n.01', 'name': 'false_buckthorn'}, {'id': 20685, 'synset': 'star_apple.n.01', 'name': 'star_apple'}, {'id': 20686, 'synset': 'satinleaf.n.01', 'name': 'satinleaf'}, {'id': 20687, 'synset': 'balata.n.02', 'name': 'balata'}, {'id': 20688, 'synset': 'sapodilla.n.01', 'name': 'sapodilla'}, {'id': 20689, 'synset': 'gutta-percha_tree.n.02', 'name': 'gutta-percha_tree'}, {'id': 20690, 'synset': 'gutta-percha_tree.n.01', 'name': 'gutta-percha_tree'}, {'id': 20691, 'synset': 'canistel.n.01', 'name': 'canistel'}, {'id': 20692, 'synset': 'marmalade_tree.n.01', 'name': 'marmalade_tree'}, {'id': 20693, 'synset': 'sweetleaf.n.01', 'name': 'sweetleaf'}, {'id': 20694, 'synset': 'asiatic_sweetleaf.n.01', 'name': 'Asiatic_sweetleaf'}, {'id': 20695, 'synset': 'styrax.n.01', 'name': 'styrax'}, {'id': 20696, 'synset': 'snowbell.n.01', 'name': 'snowbell'}, {'id': 20697, 'synset': 'japanese_snowbell.n.01', 'name': 'Japanese_snowbell'}, {'id': 20698, 'synset': 'texas_snowbell.n.01', 'name': 'Texas_snowbell'}, {'id': 20699, 'synset': 'silver-bell_tree.n.01', 'name': 'silver-bell_tree'}, {'id': 20700, 'synset': 'carnivorous_plant.n.01', 'name': 'carnivorous_plant'}, {'id': 20701, 'synset': 'pitcher_plant.n.01', 'name': 'pitcher_plant'}, {'id': 20702, 'synset': 'common_pitcher_plant.n.01', 'name': 'common_pitcher_plant'}, {'id': 20703, 'synset': 'hooded_pitcher_plant.n.01', 'name': 'hooded_pitcher_plant'}, {'id': 20704, 'synset': "huntsman's_horn.n.01", 'name': "huntsman's_horn"}, {'id': 20705, 'synset': 'tropical_pitcher_plant.n.01', 'name': 'tropical_pitcher_plant'}, {'id': 20706, 'synset': 'sundew.n.01', 'name': 'sundew'}, {'id': 20707, 'synset': "venus's_flytrap.n.01", 'name': "Venus's_flytrap"}, {'id': 20708, 'synset': 'waterwheel_plant.n.01', 'name': 'waterwheel_plant'}, {'id': 20709, 'synset': 'drosophyllum_lusitanicum.n.01', 'name': 'Drosophyllum_lusitanicum'}, {'id': 20710, 'synset': 'roridula.n.01', 'name': 'roridula'}, {'id': 20711, 'synset': 'australian_pitcher_plant.n.01', 'name': 'Australian_pitcher_plant'}, {'id': 20712, 'synset': 'sedum.n.01', 'name': 'sedum'}, {'id': 20713, 'synset': 'stonecrop.n.01', 'name': 'stonecrop'}, {'id': 20714, 'synset': 'rose-root.n.01', 'name': 'rose-root'}, {'id': 20715, 'synset': 'orpine.n.01', 'name': 'orpine'}, {'id': 20716, 'synset': 'pinwheel.n.01', 'name': 'pinwheel'}, {'id': 20717, 'synset': 'christmas_bush.n.01', 'name': 'Christmas_bush'}, {'id': 20718, 'synset': 'hortensia.n.01', 'name': 'hortensia'}, {'id': 20719, 'synset': 'fall-blooming_hydrangea.n.01', 'name': 'fall-blooming_hydrangea'}, {'id': 20720, 'synset': 'carpenteria.n.01', 'name': 'carpenteria'}, {'id': 20721, 'synset': 'decumary.n.01', 'name': 'decumary'}, {'id': 20722, 'synset': 'deutzia.n.01', 'name': 'deutzia'}, {'id': 20723, 'synset': 'philadelphus.n.01', 'name': 'philadelphus'}, {'id': 20724, 'synset': 'mock_orange.n.01', 'name': 'mock_orange'}, {'id': 20725, 'synset': 'saxifrage.n.01', 'name': 'saxifrage'}, {'id': 20726, 'synset': 'yellow_mountain_saxifrage.n.01', 'name': 'yellow_mountain_saxifrage'}, {'id': 20727, 'synset': 'meadow_saxifrage.n.01', 'name': 'meadow_saxifrage'}, {'id': 20728, 'synset': 'mossy_saxifrage.n.01', 'name': 'mossy_saxifrage'}, {'id': 20729, 'synset': 'western_saxifrage.n.01', 'name': 'western_saxifrage'}, {'id': 20730, 'synset': 'purple_saxifrage.n.01', 'name': 'purple_saxifrage'}, {'id': 20731, 'synset': 'star_saxifrage.n.01', 'name': 'star_saxifrage'}, {'id': 20732, 'synset': 'strawberry_geranium.n.01', 'name': 'strawberry_geranium'}, {'id': 20733, 'synset': 'astilbe.n.01', 'name': 'astilbe'}, {'id': 20734, 'synset': 'false_goatsbeard.n.01', 'name': 'false_goatsbeard'}, {'id': 20735, 'synset': 'dwarf_astilbe.n.01', 'name': 'dwarf_astilbe'}, {'id': 20736, 'synset': 'spirea.n.01', 'name': 'spirea'}, {'id': 20737, 'synset': 'bergenia.n.01', 'name': 'bergenia'}, {'id': 20738, 'synset': 'coast_boykinia.n.01', 'name': 'coast_boykinia'}, {'id': 20739, 'synset': 'golden_saxifrage.n.01', 'name': 'golden_saxifrage'}, {'id': 20740, 'synset': 'umbrella_plant.n.01', 'name': 'umbrella_plant'}, {'id': 20741, 'synset': 'bridal_wreath.n.01', 'name': 'bridal_wreath'}, {'id': 20742, 'synset': 'alumroot.n.01', 'name': 'alumroot'}, {'id': 20743, 'synset': 'coralbells.n.01', 'name': 'coralbells'}, {'id': 20744, 'synset': 'leatherleaf_saxifrage.n.01', 'name': 'leatherleaf_saxifrage'}, {'id': 20745, 'synset': 'woodland_star.n.01', 'name': 'woodland_star'}, {'id': 20746, 'synset': 'prairie_star.n.01', 'name': 'prairie_star'}, {'id': 20747, 'synset': 'miterwort.n.01', 'name': 'miterwort'}, {'id': 20748, 'synset': "five-point_bishop's_cap.n.01", 'name': "five-point_bishop's_cap"}, {'id': 20749, 'synset': 'parnassia.n.01', 'name': 'parnassia'}, {'id': 20750, 'synset': 'bog_star.n.01', 'name': 'bog_star'}, {'id': 20751, 'synset': 'fringed_grass_of_parnassus.n.01', 'name': 'fringed_grass_of_Parnassus'}, {'id': 20752, 'synset': 'false_alumroot.n.01', 'name': 'false_alumroot'}, {'id': 20753, 'synset': 'foamflower.n.01', 'name': 'foamflower'}, {'id': 20754, 'synset': 'false_miterwort.n.01', 'name': 'false_miterwort'}, {'id': 20755, 'synset': 'pickaback_plant.n.01', 'name': 'pickaback_plant'}, {'id': 20756, 'synset': 'currant.n.02', 'name': 'currant'}, {'id': 20757, 'synset': 'black_currant.n.01', 'name': 'black_currant'}, {'id': 20758, 'synset': 'white_currant.n.01', 'name': 'white_currant'}, {'id': 20759, 'synset': 'gooseberry.n.01', 'name': 'gooseberry'}, {'id': 20760, 'synset': 'plane_tree.n.01', 'name': 'plane_tree'}, {'id': 20761, 'synset': 'london_plane.n.01', 'name': 'London_plane'}, {'id': 20762, 'synset': 'american_sycamore.n.01', 'name': 'American_sycamore'}, {'id': 20763, 'synset': 'oriental_plane.n.01', 'name': 'oriental_plane'}, {'id': 20764, 'synset': 'california_sycamore.n.01', 'name': 'California_sycamore'}, {'id': 20765, 'synset': 'arizona_sycamore.n.01', 'name': 'Arizona_sycamore'}, {'id': 20766, 'synset': 'greek_valerian.n.01', 'name': 'Greek_valerian'}, {'id': 20767, 'synset': "northern_jacob's_ladder.n.01", 'name': "northern_Jacob's_ladder"}, {'id': 20768, 'synset': 'skunkweed.n.01', 'name': 'skunkweed'}, {'id': 20769, 'synset': 'phlox.n.01', 'name': 'phlox'}, {'id': 20770, 'synset': 'moss_pink.n.02', 'name': 'moss_pink'}, {'id': 20771, 'synset': 'evening-snow.n.01', 'name': 'evening-snow'}, {'id': 20772, 'synset': 'acanthus.n.01', 'name': 'acanthus'}, {'id': 20773, 'synset': "bear's_breech.n.01", 'name': "bear's_breech"}, {'id': 20774, 'synset': 'caricature_plant.n.01', 'name': 'caricature_plant'}, {'id': 20775, 'synset': 'black-eyed_susan.n.01', 'name': 'black-eyed_Susan'}, {'id': 20776, 'synset': 'catalpa.n.01', 'name': 'catalpa'}, {'id': 20777, 'synset': 'catalpa_bignioides.n.01', 'name': 'Catalpa_bignioides'}, {'id': 20778, 'synset': 'catalpa_speciosa.n.01', 'name': 'Catalpa_speciosa'}, {'id': 20779, 'synset': 'desert_willow.n.01', 'name': 'desert_willow'}, {'id': 20780, 'synset': 'calabash.n.02', 'name': 'calabash'}, {'id': 20781, 'synset': 'calabash.n.01', 'name': 'calabash'}, {'id': 20782, 'synset': 'borage.n.01', 'name': 'borage'}, {'id': 20783, 'synset': 'common_amsinckia.n.01', 'name': 'common_amsinckia'}, {'id': 20784, 'synset': 'anchusa.n.01', 'name': 'anchusa'}, {'id': 20785, 'synset': 'bugloss.n.01', 'name': 'bugloss'}, {'id': 20786, 'synset': 'cape_forget-me-not.n.02', 'name': 'cape_forget-me-not'}, {'id': 20787, 'synset': 'cape_forget-me-not.n.01', 'name': 'cape_forget-me-not'}, {'id': 20788, 'synset': 'spanish_elm.n.02', 'name': 'Spanish_elm'}, {'id': 20789, 'synset': 'princewood.n.01', 'name': 'princewood'}, {'id': 20790, 'synset': 'chinese_forget-me-not.n.01', 'name': 'Chinese_forget-me-not'}, {'id': 20791, 'synset': "hound's-tongue.n.02", 'name': "hound's-tongue"}, {'id': 20792, 'synset': "hound's-tongue.n.01", 'name': "hound's-tongue"}, {'id': 20793, 'synset': 'blueweed.n.01', 'name': 'blueweed'}, {'id': 20794, 'synset': "beggar's_lice.n.01", 'name': "beggar's_lice"}, {'id': 20795, 'synset': 'gromwell.n.01', 'name': 'gromwell'}, {'id': 20796, 'synset': 'puccoon.n.01', 'name': 'puccoon'}, {'id': 20797, 'synset': 'virginia_bluebell.n.01', 'name': 'Virginia_bluebell'}, {'id': 20798, 'synset': 'garden_forget-me-not.n.01', 'name': 'garden_forget-me-not'}, {'id': 20799, 'synset': 'forget-me-not.n.01', 'name': 'forget-me-not'}, {'id': 20800, 'synset': 'false_gromwell.n.01', 'name': 'false_gromwell'}, {'id': 20801, 'synset': 'comfrey.n.01', 'name': 'comfrey'}, {'id': 20802, 'synset': 'common_comfrey.n.01', 'name': 'common_comfrey'}, {'id': 20803, 'synset': 'convolvulus.n.01', 'name': 'convolvulus'}, {'id': 20804, 'synset': 'bindweed.n.01', 'name': 'bindweed'}, {'id': 20805, 'synset': 'field_bindweed.n.01', 'name': 'field_bindweed'}, {'id': 20806, 'synset': 'scammony.n.03', 'name': 'scammony'}, {'id': 20807, 'synset': 'silverweed.n.01', 'name': 'silverweed'}, {'id': 20808, 'synset': 'dodder.n.01', 'name': 'dodder'}, {'id': 20809, 'synset': 'dichondra.n.01', 'name': 'dichondra'}, {'id': 20810, 'synset': 'cypress_vine.n.01', 'name': 'cypress_vine'}, {'id': 20811, 'synset': 'moonflower.n.01', 'name': 'moonflower'}, {'id': 20812, 'synset': 'wild_potato_vine.n.01', 'name': 'wild_potato_vine'}, {'id': 20813, 'synset': 'red_morning-glory.n.01', 'name': 'red_morning-glory'}, {'id': 20814, 'synset': 'man-of-the-earth.n.01', 'name': 'man-of-the-earth'}, {'id': 20815, 'synset': 'scammony.n.01', 'name': 'scammony'}, {'id': 20816, 'synset': 'japanese_morning_glory.n.01', 'name': 'Japanese_morning_glory'}, {'id': 20817, 'synset': 'imperial_japanese_morning_glory.n.01', 'name': 'imperial_Japanese_morning_glory'}, {'id': 20818, 'synset': 'gesneriad.n.01', 'name': 'gesneriad'}, {'id': 20819, 'synset': 'gesneria.n.01', 'name': 'gesneria'}, {'id': 20820, 'synset': 'achimenes.n.01', 'name': 'achimenes'}, {'id': 20821, 'synset': 'aeschynanthus.n.01', 'name': 'aeschynanthus'}, {'id': 20822, 'synset': 'lace-flower_vine.n.01', 'name': 'lace-flower_vine'}, {'id': 20823, 'synset': 'columnea.n.01', 'name': 'columnea'}, {'id': 20824, 'synset': 'episcia.n.01', 'name': 'episcia'}, {'id': 20825, 'synset': 'gloxinia.n.01', 'name': 'gloxinia'}, {'id': 20826, 'synset': 'canterbury_bell.n.01', 'name': 'Canterbury_bell'}, {'id': 20827, 'synset': 'kohleria.n.01', 'name': 'kohleria'}, {'id': 20828, 'synset': 'african_violet.n.01', 'name': 'African_violet'}, {'id': 20829, 'synset': 'streptocarpus.n.01', 'name': 'streptocarpus'}, {'id': 20830, 'synset': 'cape_primrose.n.01', 'name': 'Cape_primrose'}, {'id': 20831, 'synset': 'waterleaf.n.01', 'name': 'waterleaf'}, {'id': 20832, 'synset': 'virginia_waterleaf.n.01', 'name': 'Virginia_waterleaf'}, {'id': 20833, 'synset': 'yellow_bells.n.01', 'name': 'yellow_bells'}, {'id': 20834, 'synset': 'yerba_santa.n.01', 'name': 'yerba_santa'}, {'id': 20835, 'synset': 'nemophila.n.01', 'name': 'nemophila'}, {'id': 20836, 'synset': 'baby_blue-eyes.n.01', 'name': 'baby_blue-eyes'}, {'id': 20837, 'synset': 'five-spot.n.02', 'name': 'five-spot'}, {'id': 20838, 'synset': 'scorpionweed.n.01', 'name': 'scorpionweed'}, {'id': 20839, 'synset': 'california_bluebell.n.02', 'name': 'California_bluebell'}, {'id': 20840, 'synset': 'california_bluebell.n.01', 'name': 'California_bluebell'}, {'id': 20841, 'synset': 'fiddleneck.n.01', 'name': 'fiddleneck'}, {'id': 20842, 'synset': 'fiesta_flower.n.01', 'name': 'fiesta_flower'}, {'id': 20843, 'synset': 'basil_thyme.n.01', 'name': 'basil_thyme'}, {'id': 20844, 'synset': 'giant_hyssop.n.01', 'name': 'giant_hyssop'}, {'id': 20845, 'synset': 'yellow_giant_hyssop.n.01', 'name': 'yellow_giant_hyssop'}, {'id': 20846, 'synset': 'anise_hyssop.n.01', 'name': 'anise_hyssop'}, {'id': 20847, 'synset': 'mexican_hyssop.n.01', 'name': 'Mexican_hyssop'}, {'id': 20848, 'synset': 'bugle.n.02', 'name': 'bugle'}, {'id': 20849, 'synset': 'creeping_bugle.n.01', 'name': 'creeping_bugle'}, {'id': 20850, 'synset': 'erect_bugle.n.01', 'name': 'erect_bugle'}, {'id': 20851, 'synset': 'pyramid_bugle.n.01', 'name': 'pyramid_bugle'}, {'id': 20852, 'synset': 'wood_mint.n.01', 'name': 'wood_mint'}, {'id': 20853, 'synset': 'hairy_wood_mint.n.01', 'name': 'hairy_wood_mint'}, {'id': 20854, 'synset': 'downy_wood_mint.n.01', 'name': 'downy_wood_mint'}, {'id': 20855, 'synset': 'calamint.n.01', 'name': 'calamint'}, {'id': 20856, 'synset': 'common_calamint.n.01', 'name': 'common_calamint'}, {'id': 20857, 'synset': 'large-flowered_calamint.n.01', 'name': 'large-flowered_calamint'}, {'id': 20858, 'synset': 'lesser_calamint.n.01', 'name': 'lesser_calamint'}, {'id': 20859, 'synset': 'wild_basil.n.01', 'name': 'wild_basil'}, {'id': 20860, 'synset': 'horse_balm.n.01', 'name': 'horse_balm'}, {'id': 20861, 'synset': 'coleus.n.01', 'name': 'coleus'}, {'id': 20862, 'synset': 'country_borage.n.01', 'name': 'country_borage'}, {'id': 20863, 'synset': 'painted_nettle.n.01', 'name': 'painted_nettle'}, {'id': 20864, 'synset': 'apalachicola_rosemary.n.01', 'name': 'Apalachicola_rosemary'}, {'id': 20865, 'synset': 'dragonhead.n.01', 'name': 'dragonhead'}, {'id': 20866, 'synset': 'elsholtzia.n.01', 'name': 'elsholtzia'}, {'id': 20867, 'synset': 'hemp_nettle.n.01', 'name': 'hemp_nettle'}, {'id': 20868, 'synset': 'ground_ivy.n.01', 'name': 'ground_ivy'}, {'id': 20869, 'synset': 'pennyroyal.n.02', 'name': 'pennyroyal'}, {'id': 20870, 'synset': 'hyssop.n.01', 'name': 'hyssop'}, {'id': 20871, 'synset': 'dead_nettle.n.02', 'name': 'dead_nettle'}, {'id': 20872, 'synset': 'white_dead_nettle.n.01', 'name': 'white_dead_nettle'}, {'id': 20873, 'synset': 'henbit.n.01', 'name': 'henbit'}, {'id': 20874, 'synset': 'english_lavender.n.01', 'name': 'English_lavender'}, {'id': 20875, 'synset': 'french_lavender.n.02', 'name': 'French_lavender'}, {'id': 20876, 'synset': 'spike_lavender.n.01', 'name': 'spike_lavender'}, {'id': 20877, 'synset': 'dagga.n.01', 'name': 'dagga'}, {'id': 20878, 'synset': "lion's-ear.n.01", 'name': "lion's-ear"}, {'id': 20879, 'synset': 'motherwort.n.01', 'name': 'motherwort'}, {'id': 20880, 'synset': 'pitcher_sage.n.02', 'name': 'pitcher_sage'}, {'id': 20881, 'synset': 'bugleweed.n.01', 'name': 'bugleweed'}, {'id': 20882, 'synset': 'water_horehound.n.01', 'name': 'water_horehound'}, {'id': 20883, 'synset': 'gipsywort.n.01', 'name': 'gipsywort'}, {'id': 20884, 'synset': 'origanum.n.01', 'name': 'origanum'}, {'id': 20885, 'synset': 'oregano.n.01', 'name': 'oregano'}, {'id': 20886, 'synset': 'sweet_marjoram.n.01', 'name': 'sweet_marjoram'}, {'id': 20887, 'synset': 'horehound.n.01', 'name': 'horehound'}, {'id': 20888, 'synset': 'common_horehound.n.01', 'name': 'common_horehound'}, {'id': 20889, 'synset': 'lemon_balm.n.01', 'name': 'lemon_balm'}, {'id': 20890, 'synset': 'corn_mint.n.01', 'name': 'corn_mint'}, {'id': 20891, 'synset': 'water-mint.n.01', 'name': 'water-mint'}, {'id': 20892, 'synset': 'bergamot_mint.n.02', 'name': 'bergamot_mint'}, {'id': 20893, 'synset': 'horsemint.n.03', 'name': 'horsemint'}, {'id': 20894, 'synset': 'peppermint.n.01', 'name': 'peppermint'}, {'id': 20895, 'synset': 'spearmint.n.01', 'name': 'spearmint'}, {'id': 20896, 'synset': 'apple_mint.n.01', 'name': 'apple_mint'}, {'id': 20897, 'synset': 'pennyroyal.n.01', 'name': 'pennyroyal'}, {'id': 20898, 'synset': 'yerba_buena.n.01', 'name': 'yerba_buena'}, {'id': 20899, 'synset': 'molucca_balm.n.01', 'name': 'molucca_balm'}, {'id': 20900, 'synset': 'monarda.n.01', 'name': 'monarda'}, {'id': 20901, 'synset': 'bee_balm.n.02', 'name': 'bee_balm'}, {'id': 20902, 'synset': 'horsemint.n.02', 'name': 'horsemint'}, {'id': 20903, 'synset': 'bee_balm.n.01', 'name': 'bee_balm'}, {'id': 20904, 'synset': 'lemon_mint.n.01', 'name': 'lemon_mint'}, {'id': 20905, 'synset': 'plains_lemon_monarda.n.01', 'name': 'plains_lemon_monarda'}, {'id': 20906, 'synset': 'basil_balm.n.01', 'name': 'basil_balm'}, {'id': 20907, 'synset': 'mustang_mint.n.01', 'name': 'mustang_mint'}, {'id': 20908, 'synset': 'catmint.n.01', 'name': 'catmint'}, {'id': 20909, 'synset': 'basil.n.01', 'name': 'basil'}, {'id': 20910, 'synset': 'beefsteak_plant.n.01', 'name': 'beefsteak_plant'}, {'id': 20911, 'synset': 'phlomis.n.01', 'name': 'phlomis'}, {'id': 20912, 'synset': 'jerusalem_sage.n.01', 'name': 'Jerusalem_sage'}, {'id': 20913, 'synset': 'physostegia.n.01', 'name': 'physostegia'}, {'id': 20914, 'synset': 'plectranthus.n.01', 'name': 'plectranthus'}, {'id': 20915, 'synset': 'patchouli.n.01', 'name': 'patchouli'}, {'id': 20916, 'synset': 'self-heal.n.01', 'name': 'self-heal'}, {'id': 20917, 'synset': 'mountain_mint.n.01', 'name': 'mountain_mint'}, {'id': 20918, 'synset': 'rosemary.n.01', 'name': 'rosemary'}, {'id': 20919, 'synset': 'clary_sage.n.01', 'name': 'clary_sage'}, {'id': 20920, 'synset': 'purple_sage.n.01', 'name': 'purple_sage'}, {'id': 20921, 'synset': 'cancerweed.n.01', 'name': 'cancerweed'}, {'id': 20922, 'synset': 'common_sage.n.01', 'name': 'common_sage'}, {'id': 20923, 'synset': 'meadow_clary.n.01', 'name': 'meadow_clary'}, {'id': 20924, 'synset': 'clary.n.01', 'name': 'clary'}, {'id': 20925, 'synset': 'pitcher_sage.n.01', 'name': 'pitcher_sage'}, {'id': 20926, 'synset': 'mexican_mint.n.01', 'name': 'Mexican_mint'}, {'id': 20927, 'synset': 'wild_sage.n.01', 'name': 'wild_sage'}, {'id': 20928, 'synset': 'savory.n.01', 'name': 'savory'}, {'id': 20929, 'synset': 'summer_savory.n.01', 'name': 'summer_savory'}, {'id': 20930, 'synset': 'winter_savory.n.01', 'name': 'winter_savory'}, {'id': 20931, 'synset': 'skullcap.n.02', 'name': 'skullcap'}, {'id': 20932, 'synset': 'blue_pimpernel.n.01', 'name': 'blue_pimpernel'}, {'id': 20933, 'synset': 'hedge_nettle.n.02', 'name': 'hedge_nettle'}, {'id': 20934, 'synset': 'hedge_nettle.n.01', 'name': 'hedge_nettle'}, {'id': 20935, 'synset': 'germander.n.01', 'name': 'germander'}, {'id': 20936, 'synset': 'american_germander.n.01', 'name': 'American_germander'}, {'id': 20937, 'synset': 'cat_thyme.n.01', 'name': 'cat_thyme'}, {'id': 20938, 'synset': 'wood_sage.n.01', 'name': 'wood_sage'}, {'id': 20939, 'synset': 'thyme.n.01', 'name': 'thyme'}, {'id': 20940, 'synset': 'common_thyme.n.01', 'name': 'common_thyme'}, {'id': 20941, 'synset': 'wild_thyme.n.01', 'name': 'wild_thyme'}, {'id': 20942, 'synset': 'blue_curls.n.01', 'name': 'blue_curls'}, {'id': 20943, 'synset': 'turpentine_camphor_weed.n.01', 'name': 'turpentine_camphor_weed'}, {'id': 20944, 'synset': 'bastard_pennyroyal.n.01', 'name': 'bastard_pennyroyal'}, {'id': 20945, 'synset': 'bladderwort.n.01', 'name': 'bladderwort'}, {'id': 20946, 'synset': 'butterwort.n.01', 'name': 'butterwort'}, {'id': 20947, 'synset': 'genlisea.n.01', 'name': 'genlisea'}, {'id': 20948, 'synset': 'martynia.n.01', 'name': 'martynia'}, {'id': 20949, 'synset': 'common_unicorn_plant.n.01', 'name': 'common_unicorn_plant'}, {'id': 20950, 'synset': "sand_devil's_claw.n.01", 'name': "sand_devil's_claw"}, {'id': 20951, 'synset': 'sweet_unicorn_plant.n.01', 'name': 'sweet_unicorn_plant'}, {'id': 20952, 'synset': 'figwort.n.01', 'name': 'figwort'}, {'id': 20953, 'synset': 'snapdragon.n.01', 'name': 'snapdragon'}, {'id': 20954, 'synset': 'white_snapdragon.n.01', 'name': 'white_snapdragon'}, {'id': 20955, 'synset': 'yellow_twining_snapdragon.n.01', 'name': 'yellow_twining_snapdragon'}, {'id': 20956, 'synset': 'mediterranean_snapdragon.n.01', 'name': 'Mediterranean_snapdragon'}, {'id': 20957, 'synset': 'kitten-tails.n.01', 'name': 'kitten-tails'}, {'id': 20958, 'synset': 'alpine_besseya.n.01', 'name': 'Alpine_besseya'}, {'id': 20959, 'synset': 'false_foxglove.n.02', 'name': 'false_foxglove'}, {'id': 20960, 'synset': 'false_foxglove.n.01', 'name': 'false_foxglove'}, {'id': 20961, 'synset': 'calceolaria.n.01', 'name': 'calceolaria'}, {'id': 20962, 'synset': 'indian_paintbrush.n.02', 'name': 'Indian_paintbrush'}, {'id': 20963, 'synset': 'desert_paintbrush.n.01', 'name': 'desert_paintbrush'}, {'id': 20964, 'synset': 'giant_red_paintbrush.n.01', 'name': 'giant_red_paintbrush'}, {'id': 20965, 'synset': 'great_plains_paintbrush.n.01', 'name': 'great_plains_paintbrush'}, {'id': 20966, 'synset': 'sulfur_paintbrush.n.01', 'name': 'sulfur_paintbrush'}, {'id': 20967, 'synset': 'shellflower.n.01', 'name': 'shellflower'}, {'id': 20968, 'synset': 'maiden_blue-eyed_mary.n.01', 'name': 'maiden_blue-eyed_Mary'}, {'id': 20969, 'synset': 'blue-eyed_mary.n.01', 'name': 'blue-eyed_Mary'}, {'id': 20970, 'synset': 'foxglove.n.01', 'name': 'foxglove'}, {'id': 20971, 'synset': 'common_foxglove.n.01', 'name': 'common_foxglove'}, {'id': 20972, 'synset': 'yellow_foxglove.n.01', 'name': 'yellow_foxglove'}, {'id': 20973, 'synset': 'gerardia.n.01', 'name': 'gerardia'}, {'id': 20974, 'synset': 'blue_toadflax.n.01', 'name': 'blue_toadflax'}, {'id': 20975, 'synset': 'toadflax.n.01', 'name': 'toadflax'}, {'id': 20976, 'synset': 'golden-beard_penstemon.n.01', 'name': 'golden-beard_penstemon'}, {'id': 20977, 'synset': 'scarlet_bugler.n.01', 'name': 'scarlet_bugler'}, {'id': 20978, 'synset': 'red_shrubby_penstemon.n.01', 'name': 'red_shrubby_penstemon'}, {'id': 20979, 'synset': 'platte_river_penstemon.n.01', 'name': 'Platte_River_penstemon'}, {'id': 20980, 'synset': 'hot-rock_penstemon.n.01', 'name': 'hot-rock_penstemon'}, {'id': 20981, 'synset': "jones'_penstemon.n.01", 'name': "Jones'_penstemon"}, {'id': 20982, 'synset': 'shrubby_penstemon.n.01', 'name': 'shrubby_penstemon'}, {'id': 20983, 'synset': 'narrow-leaf_penstemon.n.01', 'name': 'narrow-leaf_penstemon'}, {'id': 20984, 'synset': 'balloon_flower.n.01', 'name': 'balloon_flower'}, {'id': 20985, 'synset': "parry's_penstemon.n.01", 'name': "Parry's_penstemon"}, {'id': 20986, 'synset': 'rock_penstemon.n.01', 'name': 'rock_penstemon'}, {'id': 20987, 'synset': "rydberg's_penstemon.n.01", 'name': "Rydberg's_penstemon"}, {'id': 20988, 'synset': 'cascade_penstemon.n.01', 'name': 'cascade_penstemon'}, {'id': 20989, 'synset': "whipple's_penstemon.n.01", 'name': "Whipple's_penstemon"}, {'id': 20990, 'synset': 'moth_mullein.n.01', 'name': 'moth_mullein'}, {'id': 20991, 'synset': 'white_mullein.n.01', 'name': 'white_mullein'}, {'id': 20992, 'synset': 'purple_mullein.n.01', 'name': 'purple_mullein'}, {'id': 20993, 'synset': 'common_mullein.n.01', 'name': 'common_mullein'}, {'id': 20994, 'synset': 'veronica.n.01', 'name': 'veronica'}, {'id': 20995, 'synset': 'field_speedwell.n.01', 'name': 'field_speedwell'}, {'id': 20996, 'synset': 'brooklime.n.02', 'name': 'brooklime'}, {'id': 20997, 'synset': 'corn_speedwell.n.01', 'name': 'corn_speedwell'}, {'id': 20998, 'synset': 'brooklime.n.01', 'name': 'brooklime'}, {'id': 20999, 'synset': 'germander_speedwell.n.01', 'name': 'germander_speedwell'}, {'id': 21000, 'synset': 'water_speedwell.n.01', 'name': 'water_speedwell'}, {'id': 21001, 'synset': 'common_speedwell.n.01', 'name': 'common_speedwell'}, {'id': 21002, 'synset': 'purslane_speedwell.n.01', 'name': 'purslane_speedwell'}, {'id': 21003, 'synset': 'thyme-leaved_speedwell.n.01', 'name': 'thyme-leaved_speedwell'}, {'id': 21004, 'synset': 'nightshade.n.01', 'name': 'nightshade'}, {'id': 21005, 'synset': 'horse_nettle.n.01', 'name': 'horse_nettle'}, {'id': 21006, 'synset': 'african_holly.n.01', 'name': 'African_holly'}, {'id': 21007, 'synset': 'potato_vine.n.02', 'name': 'potato_vine'}, {'id': 21008, 'synset': 'garden_huckleberry.n.01', 'name': 'garden_huckleberry'}, {'id': 21009, 'synset': 'naranjilla.n.01', 'name': 'naranjilla'}, {'id': 21010, 'synset': 'potato_vine.n.01', 'name': 'potato_vine'}, {'id': 21011, 'synset': 'potato_tree.n.01', 'name': 'potato_tree'}, {'id': 21012, 'synset': 'belladonna.n.01', 'name': 'belladonna'}, {'id': 21013, 'synset': 'bush_violet.n.01', 'name': 'bush_violet'}, {'id': 21014, 'synset': 'lady-of-the-night.n.01', 'name': 'lady-of-the-night'}, {'id': 21015, 'synset': "angel's_trumpet.n.02", 'name': "angel's_trumpet"}, {'id': 21016, 'synset': "angel's_trumpet.n.01", 'name': "angel's_trumpet"}, {'id': 21017, 'synset': "red_angel's_trumpet.n.01", 'name': "red_angel's_trumpet"}, {'id': 21018, 'synset': 'cone_pepper.n.01', 'name': 'cone_pepper'}, {'id': 21019, 'synset': 'bird_pepper.n.01', 'name': 'bird_pepper'}, {'id': 21020, 'synset': 'day_jessamine.n.01', 'name': 'day_jessamine'}, {'id': 21021, 'synset': 'night_jasmine.n.01', 'name': 'night_jasmine'}, {'id': 21022, 'synset': 'tree_tomato.n.01', 'name': 'tree_tomato'}, {'id': 21023, 'synset': 'thorn_apple.n.01', 'name': 'thorn_apple'}, {'id': 21024, 'synset': 'jimsonweed.n.01', 'name': 'jimsonweed'}, {'id': 21025, 'synset': 'pichi.n.01', 'name': 'pichi'}, {'id': 21026, 'synset': 'henbane.n.01', 'name': 'henbane'}, {'id': 21027, 'synset': 'egyptian_henbane.n.01', 'name': 'Egyptian_henbane'}, {'id': 21028, 'synset': 'matrimony_vine.n.01', 'name': 'matrimony_vine'}, {'id': 21029, 'synset': 'common_matrimony_vine.n.01', 'name': 'common_matrimony_vine'}, {'id': 21030, 'synset': 'christmasberry.n.01', 'name': 'Christmasberry'}, {'id': 21031, 'synset': 'plum_tomato.n.01', 'name': 'plum_tomato'}, {'id': 21032, 'synset': 'mandrake.n.02', 'name': 'mandrake'}, {'id': 21033, 'synset': 'mandrake_root.n.01', 'name': 'mandrake_root'}, {'id': 21034, 'synset': 'apple_of_peru.n.01', 'name': 'apple_of_Peru'}, {'id': 21035, 'synset': 'flowering_tobacco.n.01', 'name': 'flowering_tobacco'}, {'id': 21036, 'synset': 'common_tobacco.n.01', 'name': 'common_tobacco'}, {'id': 21037, 'synset': 'wild_tobacco.n.01', 'name': 'wild_tobacco'}, {'id': 21038, 'synset': 'cupflower.n.02', 'name': 'cupflower'}, {'id': 21039, 'synset': 'whitecup.n.01', 'name': 'whitecup'}, {'id': 21040, 'synset': 'petunia.n.01', 'name': 'petunia'}, {'id': 21041, 'synset': 'large_white_petunia.n.01', 'name': 'large_white_petunia'}, {'id': 21042, 'synset': 'violet-flowered_petunia.n.01', 'name': 'violet-flowered_petunia'}, {'id': 21043, 'synset': 'hybrid_petunia.n.01', 'name': 'hybrid_petunia'}, {'id': 21044, 'synset': 'cape_gooseberry.n.01', 'name': 'cape_gooseberry'}, {'id': 21045, 'synset': 'strawberry_tomato.n.01', 'name': 'strawberry_tomato'}, {'id': 21046, 'synset': 'tomatillo.n.02', 'name': 'tomatillo'}, {'id': 21047, 'synset': 'tomatillo.n.01', 'name': 'tomatillo'}, {'id': 21048, 'synset': 'yellow_henbane.n.01', 'name': 'yellow_henbane'}, {'id': 21049, 'synset': "cock's_eggs.n.01", 'name': "cock's_eggs"}, {'id': 21050, 'synset': 'salpiglossis.n.01', 'name': 'salpiglossis'}, {'id': 21051, 'synset': 'painted_tongue.n.01', 'name': 'painted_tongue'}, {'id': 21052, 'synset': 'butterfly_flower.n.01', 'name': 'butterfly_flower'}, {'id': 21053, 'synset': 'scopolia_carniolica.n.01', 'name': 'Scopolia_carniolica'}, {'id': 21054, 'synset': 'chalice_vine.n.01', 'name': 'chalice_vine'}, {'id': 21055, 'synset': 'verbena.n.01', 'name': 'verbena'}, {'id': 21056, 'synset': 'lantana.n.01', 'name': 'lantana'}, {'id': 21057, 'synset': 'black_mangrove.n.02', 'name': 'black_mangrove'}, {'id': 21058, 'synset': 'white_mangrove.n.01', 'name': 'white_mangrove'}, {'id': 21059, 'synset': 'black_mangrove.n.01', 'name': 'black_mangrove'}, {'id': 21060, 'synset': 'teak.n.02', 'name': 'teak'}, {'id': 21061, 'synset': 'spurge.n.01', 'name': 'spurge'}, {'id': 21062, 'synset': 'sun_spurge.n.01', 'name': 'sun_spurge'}, {'id': 21063, 'synset': 'petty_spurge.n.01', 'name': 'petty_spurge'}, {'id': 21064, 'synset': "medusa's_head.n.01", 'name': "medusa's_head"}, {'id': 21065, 'synset': 'wild_spurge.n.01', 'name': 'wild_spurge'}, {'id': 21066, 'synset': 'snow-on-the-mountain.n.01', 'name': 'snow-on-the-mountain'}, {'id': 21067, 'synset': 'cypress_spurge.n.01', 'name': 'cypress_spurge'}, {'id': 21068, 'synset': 'leafy_spurge.n.01', 'name': 'leafy_spurge'}, {'id': 21069, 'synset': 'hairy_spurge.n.01', 'name': 'hairy_spurge'}, {'id': 21070, 'synset': 'poinsettia.n.01', 'name': 'poinsettia'}, {'id': 21071, 'synset': 'japanese_poinsettia.n.01', 'name': 'Japanese_poinsettia'}, {'id': 21072, 'synset': 'fire-on-the-mountain.n.01', 'name': 'fire-on-the-mountain'}, {'id': 21073, 'synset': 'wood_spurge.n.01', 'name': 'wood_spurge'}, {'id': 21074, 'synset': 'dwarf_spurge.n.01', 'name': 'dwarf_spurge'}, {'id': 21075, 'synset': 'scarlet_plume.n.01', 'name': 'scarlet_plume'}, {'id': 21076, 'synset': 'naboom.n.01', 'name': 'naboom'}, {'id': 21077, 'synset': 'crown_of_thorns.n.02', 'name': 'crown_of_thorns'}, {'id': 21078, 'synset': 'toothed_spurge.n.01', 'name': 'toothed_spurge'}, {'id': 21079, 'synset': 'three-seeded_mercury.n.01', 'name': 'three-seeded_mercury'}, {'id': 21080, 'synset': 'croton.n.02', 'name': 'croton'}, {'id': 21081, 'synset': 'cascarilla.n.01', 'name': 'cascarilla'}, {'id': 21082, 'synset': 'cascarilla_bark.n.01', 'name': 'cascarilla_bark'}, {'id': 21083, 'synset': 'castor-oil_plant.n.01', 'name': 'castor-oil_plant'}, {'id': 21084, 'synset': 'spurge_nettle.n.01', 'name': 'spurge_nettle'}, {'id': 21085, 'synset': 'physic_nut.n.01', 'name': 'physic_nut'}, {'id': 21086, 'synset': 'para_rubber_tree.n.01', 'name': 'Para_rubber_tree'}, {'id': 21087, 'synset': 'cassava.n.03', 'name': 'cassava'}, {'id': 21088, 'synset': 'bitter_cassava.n.01', 'name': 'bitter_cassava'}, {'id': 21089, 'synset': 'cassava.n.02', 'name': 'cassava'}, {'id': 21090, 'synset': 'sweet_cassava.n.01', 'name': 'sweet_cassava'}, {'id': 21091, 'synset': 'candlenut.n.01', 'name': 'candlenut'}, {'id': 21092, 'synset': 'tung_tree.n.01', 'name': 'tung_tree'}, {'id': 21093, 'synset': 'slipper_spurge.n.01', 'name': 'slipper_spurge'}, {'id': 21094, 'synset': 'candelilla.n.01', 'name': 'candelilla'}, {'id': 21095, 'synset': 'jewbush.n.01', 'name': 'Jewbush'}, {'id': 21096, 'synset': 'jumping_bean.n.01', 'name': 'jumping_bean'}, {'id': 21097, 'synset': 'camellia.n.01', 'name': 'camellia'}, {'id': 21098, 'synset': 'japonica.n.01', 'name': 'japonica'}, {'id': 21099, 'synset': 'umbellifer.n.01', 'name': 'umbellifer'}, {'id': 21100, 'synset': 'wild_parsley.n.01', 'name': 'wild_parsley'}, {'id': 21101, 'synset': "fool's_parsley.n.01", 'name': "fool's_parsley"}, {'id': 21102, 'synset': 'dill.n.01', 'name': 'dill'}, {'id': 21103, 'synset': 'angelica.n.01', 'name': 'angelica'}, {'id': 21104, 'synset': 'garden_angelica.n.01', 'name': 'garden_angelica'}, {'id': 21105, 'synset': 'wild_angelica.n.01', 'name': 'wild_angelica'}, {'id': 21106, 'synset': 'chervil.n.01', 'name': 'chervil'}, {'id': 21107, 'synset': 'cow_parsley.n.01', 'name': 'cow_parsley'}, {'id': 21108, 'synset': 'wild_celery.n.01', 'name': 'wild_celery'}, {'id': 21109, 'synset': 'astrantia.n.01', 'name': 'astrantia'}, {'id': 21110, 'synset': 'greater_masterwort.n.01', 'name': 'greater_masterwort'}, {'id': 21111, 'synset': 'caraway.n.01', 'name': 'caraway'}, {'id': 21112, 'synset': 'whorled_caraway.n.01', 'name': 'whorled_caraway'}, {'id': 21113, 'synset': 'water_hemlock.n.01', 'name': 'water_hemlock'}, {'id': 21114, 'synset': 'spotted_cowbane.n.01', 'name': 'spotted_cowbane'}, {'id': 21115, 'synset': 'hemlock.n.02', 'name': 'hemlock'}, {'id': 21116, 'synset': 'earthnut.n.02', 'name': 'earthnut'}, {'id': 21117, 'synset': 'cumin.n.01', 'name': 'cumin'}, {'id': 21118, 'synset': 'wild_carrot.n.01', 'name': 'wild_carrot'}, {'id': 21119, 'synset': 'eryngo.n.01', 'name': 'eryngo'}, {'id': 21120, 'synset': 'sea_holly.n.01', 'name': 'sea_holly'}, {'id': 21121, 'synset': 'button_snakeroot.n.02', 'name': 'button_snakeroot'}, {'id': 21122, 'synset': 'rattlesnake_master.n.01', 'name': 'rattlesnake_master'}, {'id': 21123, 'synset': 'fennel.n.01', 'name': 'fennel'}, {'id': 21124, 'synset': 'common_fennel.n.01', 'name': 'common_fennel'}, {'id': 21125, 'synset': 'florence_fennel.n.01', 'name': 'Florence_fennel'}, {'id': 21126, 'synset': 'cow_parsnip.n.01', 'name': 'cow_parsnip'}, {'id': 21127, 'synset': 'lovage.n.01', 'name': 'lovage'}, {'id': 21128, 'synset': 'sweet_cicely.n.01', 'name': 'sweet_cicely'}, {'id': 21129, 'synset': 'water_fennel.n.01', 'name': 'water_fennel'}, {'id': 21130, 'synset': 'parsnip.n.02', 'name': 'parsnip'}, {'id': 21131, 'synset': 'cultivated_parsnip.n.01', 'name': 'cultivated_parsnip'}, {'id': 21132, 'synset': 'wild_parsnip.n.01', 'name': 'wild_parsnip'}, {'id': 21133, 'synset': 'parsley.n.01', 'name': 'parsley'}, {'id': 21134, 'synset': 'italian_parsley.n.01', 'name': 'Italian_parsley'}, {'id': 21135, 'synset': 'hamburg_parsley.n.01', 'name': 'Hamburg_parsley'}, {'id': 21136, 'synset': 'anise.n.01', 'name': 'anise'}, {'id': 21137, 'synset': 'sanicle.n.01', 'name': 'sanicle'}, {'id': 21138, 'synset': 'purple_sanicle.n.01', 'name': 'purple_sanicle'}, {'id': 21139, 'synset': 'european_sanicle.n.01', 'name': 'European_sanicle'}, {'id': 21140, 'synset': 'water_parsnip.n.01', 'name': 'water_parsnip'}, {'id': 21141, 'synset': 'greater_water_parsnip.n.01', 'name': 'greater_water_parsnip'}, {'id': 21142, 'synset': 'skirret.n.01', 'name': 'skirret'}, {'id': 21143, 'synset': 'dogwood.n.01', 'name': 'dogwood'}, {'id': 21144, 'synset': 'common_white_dogwood.n.01', 'name': 'common_white_dogwood'}, {'id': 21145, 'synset': 'red_osier.n.01', 'name': 'red_osier'}, {'id': 21146, 'synset': 'silky_dogwood.n.02', 'name': 'silky_dogwood'}, {'id': 21147, 'synset': 'silky_cornel.n.01', 'name': 'silky_cornel'}, {'id': 21148, 'synset': 'common_european_dogwood.n.01', 'name': 'common_European_dogwood'}, {'id': 21149, 'synset': 'bunchberry.n.01', 'name': 'bunchberry'}, {'id': 21150, 'synset': 'cornelian_cherry.n.01', 'name': 'cornelian_cherry'}, {'id': 21151, 'synset': 'puka.n.01', 'name': 'puka'}, {'id': 21152, 'synset': 'kapuka.n.01', 'name': 'kapuka'}, {'id': 21153, 'synset': 'valerian.n.01', 'name': 'valerian'}, {'id': 21154, 'synset': 'common_valerian.n.01', 'name': 'common_valerian'}, {'id': 21155, 'synset': 'common_corn_salad.n.01', 'name': 'common_corn_salad'}, {'id': 21156, 'synset': 'red_valerian.n.01', 'name': 'red_valerian'}, {'id': 21157, 'synset': 'filmy_fern.n.02', 'name': 'filmy_fern'}, {'id': 21158, 'synset': 'bristle_fern.n.01', 'name': 'bristle_fern'}, {'id': 21159, 'synset': "hare's-foot_bristle_fern.n.01", 'name': "hare's-foot_bristle_fern"}, {'id': 21160, 'synset': 'killarney_fern.n.01', 'name': 'Killarney_fern'}, {'id': 21161, 'synset': 'kidney_fern.n.01', 'name': 'kidney_fern'}, {'id': 21162, 'synset': 'flowering_fern.n.02', 'name': 'flowering_fern'}, {'id': 21163, 'synset': 'royal_fern.n.01', 'name': 'royal_fern'}, {'id': 21164, 'synset': 'interrupted_fern.n.01', 'name': 'interrupted_fern'}, {'id': 21165, 'synset': 'crape_fern.n.01', 'name': 'crape_fern'}, {'id': 21166, 'synset': 'crepe_fern.n.01', 'name': 'crepe_fern'}, {'id': 21167, 'synset': 'curly_grass.n.01', 'name': 'curly_grass'}, {'id': 21168, 'synset': 'pine_fern.n.01', 'name': 'pine_fern'}, {'id': 21169, 'synset': 'climbing_fern.n.01', 'name': 'climbing_fern'}, {'id': 21170, 'synset': 'creeping_fern.n.01', 'name': 'creeping_fern'}, {'id': 21171, 'synset': 'climbing_maidenhair.n.01', 'name': 'climbing_maidenhair'}, {'id': 21172, 'synset': 'scented_fern.n.02', 'name': 'scented_fern'}, {'id': 21173, 'synset': 'clover_fern.n.01', 'name': 'clover_fern'}, {'id': 21174, 'synset': 'nardoo.n.01', 'name': 'nardoo'}, {'id': 21175, 'synset': 'water_clover.n.01', 'name': 'water_clover'}, {'id': 21176, 'synset': 'pillwort.n.01', 'name': 'pillwort'}, {'id': 21177, 'synset': 'regnellidium.n.01', 'name': 'regnellidium'}, {'id': 21178, 'synset': 'floating-moss.n.01', 'name': 'floating-moss'}, {'id': 21179, 'synset': 'mosquito_fern.n.01', 'name': 'mosquito_fern'}, {'id': 21180, 'synset': "adder's_tongue.n.01", 'name': "adder's_tongue"}, {'id': 21181, 'synset': 'ribbon_fern.n.03', 'name': 'ribbon_fern'}, {'id': 21182, 'synset': 'grape_fern.n.01', 'name': 'grape_fern'}, {'id': 21183, 'synset': 'daisyleaf_grape_fern.n.01', 'name': 'daisyleaf_grape_fern'}, {'id': 21184, 'synset': 'leathery_grape_fern.n.01', 'name': 'leathery_grape_fern'}, {'id': 21185, 'synset': 'rattlesnake_fern.n.01', 'name': 'rattlesnake_fern'}, {'id': 21186, 'synset': 'flowering_fern.n.01', 'name': 'flowering_fern'}, {'id': 21187, 'synset': 'powdery_mildew.n.01', 'name': 'powdery_mildew'}, {'id': 21188, 'synset': 'dutch_elm_fungus.n.01', 'name': 'Dutch_elm_fungus'}, {'id': 21189, 'synset': 'ergot.n.02', 'name': 'ergot'}, {'id': 21190, 'synset': 'rye_ergot.n.01', 'name': 'rye_ergot'}, {'id': 21191, 'synset': 'black_root_rot_fungus.n.01', 'name': 'black_root_rot_fungus'}, {'id': 21192, 'synset': "dead-man's-fingers.n.01", 'name': "dead-man's-fingers"}, {'id': 21193, 'synset': 'sclerotinia.n.01', 'name': 'sclerotinia'}, {'id': 21194, 'synset': 'brown_cup.n.01', 'name': 'brown_cup'}, {'id': 21195, 'synset': 'earthball.n.01', 'name': 'earthball'}, {'id': 21196, 'synset': 'scleroderma_citrinum.n.01', 'name': 'Scleroderma_citrinum'}, {'id': 21197, 'synset': 'scleroderma_flavidium.n.01', 'name': 'Scleroderma_flavidium'}, {'id': 21198, 'synset': 'scleroderma_bovista.n.01', 'name': 'Scleroderma_bovista'}, {'id': 21199, 'synset': 'podaxaceae.n.01', 'name': 'Podaxaceae'}, {'id': 21200, 'synset': 'stalked_puffball.n.02', 'name': 'stalked_puffball'}, {'id': 21201, 'synset': 'stalked_puffball.n.01', 'name': 'stalked_puffball'}, {'id': 21202, 'synset': 'false_truffle.n.01', 'name': 'false_truffle'}, {'id': 21203, 'synset': 'rhizopogon_idahoensis.n.01', 'name': 'Rhizopogon_idahoensis'}, {'id': 21204, 'synset': 'truncocolumella_citrina.n.01', 'name': 'Truncocolumella_citrina'}, {'id': 21205, 'synset': 'mucor.n.01', 'name': 'mucor'}, {'id': 21206, 'synset': 'rhizopus.n.01', 'name': 'rhizopus'}, {'id': 21207, 'synset': 'bread_mold.n.01', 'name': 'bread_mold'}, {'id': 21208, 'synset': 'slime_mold.n.01', 'name': 'slime_mold'}, {'id': 21209, 'synset': 'true_slime_mold.n.01', 'name': 'true_slime_mold'}, {'id': 21210, 'synset': 'cellular_slime_mold.n.01', 'name': 'cellular_slime_mold'}, {'id': 21211, 'synset': 'dictostylium.n.01', 'name': 'dictostylium'}, {'id': 21212, 'synset': 'pond-scum_parasite.n.01', 'name': 'pond-scum_parasite'}, {'id': 21213, 'synset': 'potato_wart_fungus.n.01', 'name': 'potato_wart_fungus'}, {'id': 21214, 'synset': 'white_fungus.n.01', 'name': 'white_fungus'}, {'id': 21215, 'synset': 'water_mold.n.01', 'name': 'water_mold'}, {'id': 21216, 'synset': 'downy_mildew.n.01', 'name': 'downy_mildew'}, {'id': 21217, 'synset': 'blue_mold_fungus.n.01', 'name': 'blue_mold_fungus'}, {'id': 21218, 'synset': 'onion_mildew.n.01', 'name': 'onion_mildew'}, {'id': 21219, 'synset': 'tobacco_mildew.n.01', 'name': 'tobacco_mildew'}, {'id': 21220, 'synset': 'white_rust.n.01', 'name': 'white_rust'}, {'id': 21221, 'synset': 'pythium.n.01', 'name': 'pythium'}, {'id': 21222, 'synset': 'damping_off_fungus.n.01', 'name': 'damping_off_fungus'}, {'id': 21223, 'synset': 'phytophthora_citrophthora.n.01', 'name': 'Phytophthora_citrophthora'}, {'id': 21224, 'synset': 'phytophthora_infestans.n.01', 'name': 'Phytophthora_infestans'}, {'id': 21225, 'synset': 'clubroot_fungus.n.01', 'name': 'clubroot_fungus'}, {'id': 21226, 'synset': 'geglossaceae.n.01', 'name': 'Geglossaceae'}, {'id': 21227, 'synset': 'sarcosomataceae.n.01', 'name': 'Sarcosomataceae'}, {'id': 21228, 'synset': 'rufous_rubber_cup.n.01', 'name': 'Rufous_rubber_cup'}, {'id': 21229, 'synset': "devil's_cigar.n.01", 'name': "devil's_cigar"}, {'id': 21230, 'synset': "devil's_urn.n.01", 'name': "devil's_urn"}, {'id': 21231, 'synset': 'truffle.n.01', 'name': 'truffle'}, {'id': 21232, 'synset': 'club_fungus.n.01', 'name': 'club_fungus'}, {'id': 21233, 'synset': 'coral_fungus.n.01', 'name': 'coral_fungus'}, {'id': 21234, 'synset': 'tooth_fungus.n.01', 'name': 'tooth_fungus'}, {'id': 21235, 'synset': 'lichen.n.02', 'name': 'lichen'}, {'id': 21236, 'synset': 'ascolichen.n.01', 'name': 'ascolichen'}, {'id': 21237, 'synset': 'basidiolichen.n.01', 'name': 'basidiolichen'}, {'id': 21238, 'synset': 'lecanora.n.01', 'name': 'lecanora'}, {'id': 21239, 'synset': 'manna_lichen.n.01', 'name': 'manna_lichen'}, {'id': 21240, 'synset': 'archil.n.02', 'name': 'archil'}, {'id': 21241, 'synset': 'roccella.n.01', 'name': 'roccella'}, {'id': 21242, 'synset': 'beard_lichen.n.01', 'name': 'beard_lichen'}, {'id': 21243, 'synset': 'horsehair_lichen.n.01', 'name': 'horsehair_lichen'}, {'id': 21244, 'synset': 'reindeer_moss.n.01', 'name': 'reindeer_moss'}, {'id': 21245, 'synset': 'crottle.n.01', 'name': 'crottle'}, {'id': 21246, 'synset': 'iceland_moss.n.01', 'name': 'Iceland_moss'}, {'id': 21247, 'synset': 'fungus.n.01', 'name': 'fungus'}, {'id': 21248, 'synset': 'promycelium.n.01', 'name': 'promycelium'}, {'id': 21249, 'synset': 'true_fungus.n.01', 'name': 'true_fungus'}, {'id': 21250, 'synset': 'basidiomycete.n.01', 'name': 'basidiomycete'}, {'id': 21251, 'synset': 'mushroom.n.03', 'name': 'mushroom'}, {'id': 21252, 'synset': 'agaric.n.02', 'name': 'agaric'}, {'id': 21253, 'synset': 'mushroom.n.01', 'name': 'mushroom'}, {'id': 21254, 'synset': 'toadstool.n.01', 'name': 'toadstool'}, {'id': 21255, 'synset': 'horse_mushroom.n.01', 'name': 'horse_mushroom'}, {'id': 21256, 'synset': 'meadow_mushroom.n.01', 'name': 'meadow_mushroom'}, {'id': 21257, 'synset': 'shiitake.n.01', 'name': 'shiitake'}, {'id': 21258, 'synset': 'scaly_lentinus.n.01', 'name': 'scaly_lentinus'}, {'id': 21259, 'synset': 'royal_agaric.n.01', 'name': 'royal_agaric'}, {'id': 21260, 'synset': 'false_deathcap.n.01', 'name': 'false_deathcap'}, {'id': 21261, 'synset': 'fly_agaric.n.01', 'name': 'fly_agaric'}, {'id': 21262, 'synset': 'death_cap.n.01', 'name': 'death_cap'}, {'id': 21263, 'synset': 'blushing_mushroom.n.01', 'name': 'blushing_mushroom'}, {'id': 21264, 'synset': 'destroying_angel.n.01', 'name': 'destroying_angel'}, {'id': 21265, 'synset': 'chanterelle.n.01', 'name': 'chanterelle'}, {'id': 21266, 'synset': 'floccose_chanterelle.n.01', 'name': 'floccose_chanterelle'}, {'id': 21267, 'synset': "pig's_ears.n.01", 'name': "pig's_ears"}, {'id': 21268, 'synset': 'cinnabar_chanterelle.n.01', 'name': 'cinnabar_chanterelle'}, {'id': 21269, 'synset': 'jack-o-lantern_fungus.n.01', 'name': 'jack-o-lantern_fungus'}, {'id': 21270, 'synset': 'inky_cap.n.01', 'name': 'inky_cap'}, {'id': 21271, 'synset': 'shaggymane.n.01', 'name': 'shaggymane'}, {'id': 21272, 'synset': 'milkcap.n.01', 'name': 'milkcap'}, {'id': 21273, 'synset': 'fairy-ring_mushroom.n.01', 'name': 'fairy-ring_mushroom'}, {'id': 21274, 'synset': 'fairy_ring.n.01', 'name': 'fairy_ring'}, {'id': 21275, 'synset': 'oyster_mushroom.n.01', 'name': 'oyster_mushroom'}, {'id': 21276, 'synset': 'olive-tree_agaric.n.01', 'name': 'olive-tree_agaric'}, {'id': 21277, 'synset': 'pholiota_astragalina.n.01', 'name': 'Pholiota_astragalina'}, {'id': 21278, 'synset': 'pholiota_aurea.n.01', 'name': 'Pholiota_aurea'}, {'id': 21279, 'synset': 'pholiota_destruens.n.01', 'name': 'Pholiota_destruens'}, {'id': 21280, 'synset': 'pholiota_flammans.n.01', 'name': 'Pholiota_flammans'}, {'id': 21281, 'synset': 'pholiota_flavida.n.01', 'name': 'Pholiota_flavida'}, {'id': 21282, 'synset': 'nameko.n.01', 'name': 'nameko'}, {'id': 21283, 'synset': 'pholiota_squarrosa-adiposa.n.01', 'name': 'Pholiota_squarrosa-adiposa'}, {'id': 21284, 'synset': 'pholiota_squarrosa.n.01', 'name': 'Pholiota_squarrosa'}, {'id': 21285, 'synset': 'pholiota_squarrosoides.n.01', 'name': 'Pholiota_squarrosoides'}, {'id': 21286, 'synset': 'stropharia_ambigua.n.01', 'name': 'Stropharia_ambigua'}, {'id': 21287, 'synset': 'stropharia_hornemannii.n.01', 'name': 'Stropharia_hornemannii'}, {'id': 21288, 'synset': 'stropharia_rugoso-annulata.n.01', 'name': 'Stropharia_rugoso-annulata'}, {'id': 21289, 'synset': 'gill_fungus.n.01', 'name': 'gill_fungus'}, {'id': 21290, 'synset': 'entoloma_lividum.n.01', 'name': 'Entoloma_lividum'}, {'id': 21291, 'synset': 'entoloma_aprile.n.01', 'name': 'Entoloma_aprile'}, {'id': 21292, 'synset': 'chlorophyllum_molybdites.n.01', 'name': 'Chlorophyllum_molybdites'}, {'id': 21293, 'synset': 'lepiota.n.01', 'name': 'lepiota'}, {'id': 21294, 'synset': 'parasol_mushroom.n.01', 'name': 'parasol_mushroom'}, {'id': 21295, 'synset': 'poisonous_parasol.n.01', 'name': 'poisonous_parasol'}, {'id': 21296, 'synset': 'lepiota_naucina.n.01', 'name': 'Lepiota_naucina'}, {'id': 21297, 'synset': 'lepiota_rhacodes.n.01', 'name': 'Lepiota_rhacodes'}, {'id': 21298, 'synset': 'american_parasol.n.01', 'name': 'American_parasol'}, {'id': 21299, 'synset': 'lepiota_rubrotincta.n.01', 'name': 'Lepiota_rubrotincta'}, {'id': 21300, 'synset': 'lepiota_clypeolaria.n.01', 'name': 'Lepiota_clypeolaria'}, {'id': 21301, 'synset': 'onion_stem.n.01', 'name': 'onion_stem'}, {'id': 21302, 'synset': 'pink_disease_fungus.n.01', 'name': 'pink_disease_fungus'}, {'id': 21303, 'synset': 'bottom_rot_fungus.n.01', 'name': 'bottom_rot_fungus'}, {'id': 21304, 'synset': 'potato_fungus.n.01', 'name': 'potato_fungus'}, {'id': 21305, 'synset': 'coffee_fungus.n.01', 'name': 'coffee_fungus'}, {'id': 21306, 'synset': 'blewits.n.01', 'name': 'blewits'}, {'id': 21307, 'synset': 'sandy_mushroom.n.01', 'name': 'sandy_mushroom'}, {'id': 21308, 'synset': 'tricholoma_pessundatum.n.01', 'name': 'Tricholoma_pessundatum'}, {'id': 21309, 'synset': 'tricholoma_sejunctum.n.01', 'name': 'Tricholoma_sejunctum'}, {'id': 21310, 'synset': 'man-on-a-horse.n.01', 'name': 'man-on-a-horse'}, {'id': 21311, 'synset': 'tricholoma_venenata.n.01', 'name': 'Tricholoma_venenata'}, {'id': 21312, 'synset': 'tricholoma_pardinum.n.01', 'name': 'Tricholoma_pardinum'}, {'id': 21313, 'synset': 'tricholoma_vaccinum.n.01', 'name': 'Tricholoma_vaccinum'}, {'id': 21314, 'synset': 'tricholoma_aurantium.n.01', 'name': 'Tricholoma_aurantium'}, {'id': 21315, 'synset': 'volvaria_bombycina.n.01', 'name': 'Volvaria_bombycina'}, {'id': 21316, 'synset': 'pluteus_aurantiorugosus.n.01', 'name': 'Pluteus_aurantiorugosus'}, {'id': 21317, 'synset': 'pluteus_magnus.n.01', 'name': 'Pluteus_magnus'}, {'id': 21318, 'synset': 'deer_mushroom.n.01', 'name': 'deer_mushroom'}, {'id': 21319, 'synset': 'straw_mushroom.n.01', 'name': 'straw_mushroom'}, {'id': 21320, 'synset': 'volvariella_bombycina.n.01', 'name': 'Volvariella_bombycina'}, {'id': 21321, 'synset': 'clitocybe_clavipes.n.01', 'name': 'Clitocybe_clavipes'}, {'id': 21322, 'synset': 'clitocybe_dealbata.n.01', 'name': 'Clitocybe_dealbata'}, {'id': 21323, 'synset': 'clitocybe_inornata.n.01', 'name': 'Clitocybe_inornata'}, {'id': 21324, 'synset': 'clitocybe_robusta.n.01', 'name': 'Clitocybe_robusta'}, {'id': 21325, 'synset': 'clitocybe_irina.n.01', 'name': 'Clitocybe_irina'}, {'id': 21326, 'synset': 'clitocybe_subconnexa.n.01', 'name': 'Clitocybe_subconnexa'}, {'id': 21327, 'synset': 'winter_mushroom.n.01', 'name': 'winter_mushroom'}, {'id': 21328, 'synset': 'mycelium.n.01', 'name': 'mycelium'}, {'id': 21329, 'synset': 'sclerotium.n.02', 'name': 'sclerotium'}, {'id': 21330, 'synset': 'sac_fungus.n.01', 'name': 'sac_fungus'}, {'id': 21331, 'synset': 'ascomycete.n.01', 'name': 'ascomycete'}, {'id': 21332, 'synset': 'clavicipitaceae.n.01', 'name': 'Clavicipitaceae'}, {'id': 21333, 'synset': 'grainy_club.n.01', 'name': 'grainy_club'}, {'id': 21334, 'synset': 'yeast.n.02', 'name': 'yeast'}, {'id': 21335, 'synset': "baker's_yeast.n.01", 'name': "baker's_yeast"}, {'id': 21336, 'synset': "wine-maker's_yeast.n.01", 'name': "wine-maker's_yeast"}, {'id': 21337, 'synset': 'aspergillus_fumigatus.n.01', 'name': 'Aspergillus_fumigatus'}, {'id': 21338, 'synset': 'brown_root_rot_fungus.n.01', 'name': 'brown_root_rot_fungus'}, {'id': 21339, 'synset': 'discomycete.n.01', 'name': 'discomycete'}, {'id': 21340, 'synset': 'leotia_lubrica.n.01', 'name': 'Leotia_lubrica'}, {'id': 21341, 'synset': 'mitrula_elegans.n.01', 'name': 'Mitrula_elegans'}, {'id': 21342, 'synset': 'sarcoscypha_coccinea.n.01', 'name': 'Sarcoscypha_coccinea'}, {'id': 21343, 'synset': 'caloscypha_fulgens.n.01', 'name': 'Caloscypha_fulgens'}, {'id': 21344, 'synset': 'aleuria_aurantia.n.01', 'name': 'Aleuria_aurantia'}, {'id': 21345, 'synset': 'elf_cup.n.01', 'name': 'elf_cup'}, {'id': 21346, 'synset': 'peziza_domicilina.n.01', 'name': 'Peziza_domicilina'}, {'id': 21347, 'synset': 'blood_cup.n.01', 'name': 'blood_cup'}, {'id': 21348, 'synset': 'urnula_craterium.n.01', 'name': 'Urnula_craterium'}, {'id': 21349, 'synset': 'galiella_rufa.n.01', 'name': 'Galiella_rufa'}, {'id': 21350, 'synset': 'jafnea_semitosta.n.01', 'name': 'Jafnea_semitosta'}, {'id': 21351, 'synset': 'morel.n.01', 'name': 'morel'}, {'id': 21352, 'synset': 'common_morel.n.01', 'name': 'common_morel'}, {'id': 21353, 'synset': 'disciotis_venosa.n.01', 'name': 'Disciotis_venosa'}, {'id': 21354, 'synset': 'verpa.n.01', 'name': 'Verpa'}, {'id': 21355, 'synset': 'verpa_bohemica.n.01', 'name': 'Verpa_bohemica'}, {'id': 21356, 'synset': 'verpa_conica.n.01', 'name': 'Verpa_conica'}, {'id': 21357, 'synset': 'black_morel.n.01', 'name': 'black_morel'}, {'id': 21358, 'synset': 'morchella_crassipes.n.01', 'name': 'Morchella_crassipes'}, {'id': 21359, 'synset': 'morchella_semilibera.n.01', 'name': 'Morchella_semilibera'}, {'id': 21360, 'synset': 'wynnea_americana.n.01', 'name': 'Wynnea_americana'}, {'id': 21361, 'synset': 'wynnea_sparassoides.n.01', 'name': 'Wynnea_sparassoides'}, {'id': 21362, 'synset': 'false_morel.n.01', 'name': 'false_morel'}, {'id': 21363, 'synset': 'lorchel.n.01', 'name': 'lorchel'}, {'id': 21364, 'synset': 'helvella.n.01', 'name': 'helvella'}, {'id': 21365, 'synset': 'helvella_crispa.n.01', 'name': 'Helvella_crispa'}, {'id': 21366, 'synset': 'helvella_acetabulum.n.01', 'name': 'Helvella_acetabulum'}, {'id': 21367, 'synset': 'helvella_sulcata.n.01', 'name': 'Helvella_sulcata'}, {'id': 21368, 'synset': 'discina.n.01', 'name': 'discina'}, {'id': 21369, 'synset': 'gyromitra.n.01', 'name': 'gyromitra'}, {'id': 21370, 'synset': 'gyromitra_californica.n.01', 'name': 'Gyromitra_californica'}, {'id': 21371, 'synset': 'gyromitra_sphaerospora.n.01', 'name': 'Gyromitra_sphaerospora'}, {'id': 21372, 'synset': 'gyromitra_esculenta.n.01', 'name': 'Gyromitra_esculenta'}, {'id': 21373, 'synset': 'gyromitra_infula.n.01', 'name': 'Gyromitra_infula'}, {'id': 21374, 'synset': 'gyromitra_fastigiata.n.01', 'name': 'Gyromitra_fastigiata'}, {'id': 21375, 'synset': 'gyromitra_gigas.n.01', 'name': 'Gyromitra_gigas'}, {'id': 21376, 'synset': 'gasteromycete.n.01', 'name': 'gasteromycete'}, {'id': 21377, 'synset': 'stinkhorn.n.01', 'name': 'stinkhorn'}, {'id': 21378, 'synset': 'common_stinkhorn.n.01', 'name': 'common_stinkhorn'}, {'id': 21379, 'synset': 'phallus_ravenelii.n.01', 'name': 'Phallus_ravenelii'}, {'id': 21380, 'synset': 'dog_stinkhorn.n.01', 'name': 'dog_stinkhorn'}, {'id': 21381, 'synset': 'calostoma_lutescens.n.01', 'name': 'Calostoma_lutescens'}, {'id': 21382, 'synset': 'calostoma_cinnabarina.n.01', 'name': 'Calostoma_cinnabarina'}, {'id': 21383, 'synset': 'calostoma_ravenelii.n.01', 'name': 'Calostoma_ravenelii'}, {'id': 21384, 'synset': 'stinky_squid.n.01', 'name': 'stinky_squid'}, {'id': 21385, 'synset': 'puffball.n.01', 'name': 'puffball'}, {'id': 21386, 'synset': 'giant_puffball.n.01', 'name': 'giant_puffball'}, {'id': 21387, 'synset': 'earthstar.n.01', 'name': 'earthstar'}, {'id': 21388, 'synset': 'geastrum_coronatum.n.01', 'name': 'Geastrum_coronatum'}, {'id': 21389, 'synset': 'radiigera_fuscogleba.n.01', 'name': 'Radiigera_fuscogleba'}, {'id': 21390, 'synset': 'astreus_pteridis.n.01', 'name': 'Astreus_pteridis'}, {'id': 21391, 'synset': 'astreus_hygrometricus.n.01', 'name': 'Astreus_hygrometricus'}, {'id': 21392, 'synset': "bird's-nest_fungus.n.01", 'name': "bird's-nest_fungus"}, {'id': 21393, 'synset': 'gastrocybe_lateritia.n.01', 'name': 'Gastrocybe_lateritia'}, {'id': 21394, 'synset': 'macowanites_americanus.n.01', 'name': 'Macowanites_americanus'}, {'id': 21395, 'synset': 'polypore.n.01', 'name': 'polypore'}, {'id': 21396, 'synset': 'bracket_fungus.n.01', 'name': 'bracket_fungus'}, {'id': 21397, 'synset': 'albatrellus_dispansus.n.01', 'name': 'Albatrellus_dispansus'}, {'id': 21398, 'synset': 'albatrellus_ovinus.n.01', 'name': 'Albatrellus_ovinus'}, {'id': 21399, 'synset': 'neolentinus_ponderosus.n.01', 'name': 'Neolentinus_ponderosus'}, {'id': 21400, 'synset': 'oligoporus_leucospongia.n.01', 'name': 'Oligoporus_leucospongia'}, {'id': 21401, 'synset': 'polyporus_tenuiculus.n.01', 'name': 'Polyporus_tenuiculus'}, {'id': 21402, 'synset': 'hen-of-the-woods.n.01', 'name': 'hen-of-the-woods'}, {'id': 21403, 'synset': 'polyporus_squamosus.n.01', 'name': 'Polyporus_squamosus'}, {'id': 21404, 'synset': 'beefsteak_fungus.n.01', 'name': 'beefsteak_fungus'}, {'id': 21405, 'synset': 'agaric.n.01', 'name': 'agaric'}, {'id': 21406, 'synset': 'bolete.n.01', 'name': 'bolete'}, {'id': 21407, 'synset': 'boletus_chrysenteron.n.01', 'name': 'Boletus_chrysenteron'}, {'id': 21408, 'synset': 'boletus_edulis.n.01', 'name': 'Boletus_edulis'}, {'id': 21409, 'synset': "frost's_bolete.n.01", 'name': "Frost's_bolete"}, {'id': 21410, 'synset': 'boletus_luridus.n.01', 'name': 'Boletus_luridus'}, {'id': 21411, 'synset': 'boletus_mirabilis.n.01', 'name': 'Boletus_mirabilis'}, {'id': 21412, 'synset': 'boletus_pallidus.n.01', 'name': 'Boletus_pallidus'}, {'id': 21413, 'synset': 'boletus_pulcherrimus.n.01', 'name': 'Boletus_pulcherrimus'}, {'id': 21414, 'synset': 'boletus_pulverulentus.n.01', 'name': 'Boletus_pulverulentus'}, {'id': 21415, 'synset': 'boletus_roxanae.n.01', 'name': 'Boletus_roxanae'}, {'id': 21416, 'synset': 'boletus_subvelutipes.n.01', 'name': 'Boletus_subvelutipes'}, {'id': 21417, 'synset': 'boletus_variipes.n.01', 'name': 'Boletus_variipes'}, {'id': 21418, 'synset': 'boletus_zelleri.n.01', 'name': 'Boletus_zelleri'}, {'id': 21419, 'synset': 'fuscoboletinus_paluster.n.01', 'name': 'Fuscoboletinus_paluster'}, {'id': 21420, 'synset': 'fuscoboletinus_serotinus.n.01', 'name': 'Fuscoboletinus_serotinus'}, {'id': 21421, 'synset': 'leccinum_fibrillosum.n.01', 'name': 'Leccinum_fibrillosum'}, {'id': 21422, 'synset': 'suillus_albivelatus.n.01', 'name': 'Suillus_albivelatus'}, {'id': 21423, 'synset': 'old-man-of-the-woods.n.01', 'name': 'old-man-of-the-woods'}, {'id': 21424, 'synset': 'boletellus_russellii.n.01', 'name': 'Boletellus_russellii'}, {'id': 21425, 'synset': 'jelly_fungus.n.01', 'name': 'jelly_fungus'}, {'id': 21426, 'synset': 'snow_mushroom.n.01', 'name': 'snow_mushroom'}, {'id': 21427, 'synset': "witches'_butter.n.01", 'name': "witches'_butter"}, {'id': 21428, 'synset': 'tremella_foliacea.n.01', 'name': 'Tremella_foliacea'}, {'id': 21429, 'synset': 'tremella_reticulata.n.01', 'name': 'Tremella_reticulata'}, {'id': 21430, 'synset': "jew's-ear.n.01", 'name': "Jew's-ear"}, {'id': 21431, 'synset': 'rust.n.04', 'name': 'rust'}, {'id': 21432, 'synset': 'aecium.n.01', 'name': 'aecium'}, {'id': 21433, 'synset': 'flax_rust.n.01', 'name': 'flax_rust'}, {'id': 21434, 'synset': 'blister_rust.n.02', 'name': 'blister_rust'}, {'id': 21435, 'synset': 'wheat_rust.n.01', 'name': 'wheat_rust'}, {'id': 21436, 'synset': 'apple_rust.n.01', 'name': 'apple_rust'}, {'id': 21437, 'synset': 'smut.n.03', 'name': 'smut'}, {'id': 21438, 'synset': 'covered_smut.n.01', 'name': 'covered_smut'}, {'id': 21439, 'synset': 'loose_smut.n.02', 'name': 'loose_smut'}, {'id': 21440, 'synset': 'cornsmut.n.01', 'name': 'cornsmut'}, {'id': 21441, 'synset': 'boil_smut.n.01', 'name': 'boil_smut'}, {'id': 21442, 'synset': 'sphacelotheca.n.01', 'name': 'Sphacelotheca'}, {'id': 21443, 'synset': 'head_smut.n.01', 'name': 'head_smut'}, {'id': 21444, 'synset': 'bunt.n.04', 'name': 'bunt'}, {'id': 21445, 'synset': 'bunt.n.03', 'name': 'bunt'}, {'id': 21446, 'synset': 'onion_smut.n.01', 'name': 'onion_smut'}, {'id': 21447, 'synset': 'flag_smut_fungus.n.01', 'name': 'flag_smut_fungus'}, {'id': 21448, 'synset': 'wheat_flag_smut.n.01', 'name': 'wheat_flag_smut'}, {'id': 21449, 'synset': 'felt_fungus.n.01', 'name': 'felt_fungus'}, {'id': 21450, 'synset': 'waxycap.n.01', 'name': 'waxycap'}, {'id': 21451, 'synset': 'hygrocybe_acutoconica.n.01', 'name': 'Hygrocybe_acutoconica'}, {'id': 21452, 'synset': 'hygrophorus_borealis.n.01', 'name': 'Hygrophorus_borealis'}, {'id': 21453, 'synset': 'hygrophorus_caeruleus.n.01', 'name': 'Hygrophorus_caeruleus'}, {'id': 21454, 'synset': 'hygrophorus_inocybiformis.n.01', 'name': 'Hygrophorus_inocybiformis'}, {'id': 21455, 'synset': 'hygrophorus_kauffmanii.n.01', 'name': 'Hygrophorus_kauffmanii'}, {'id': 21456, 'synset': 'hygrophorus_marzuolus.n.01', 'name': 'Hygrophorus_marzuolus'}, {'id': 21457, 'synset': 'hygrophorus_purpurascens.n.01', 'name': 'Hygrophorus_purpurascens'}, {'id': 21458, 'synset': 'hygrophorus_russula.n.01', 'name': 'Hygrophorus_russula'}, {'id': 21459, 'synset': 'hygrophorus_sordidus.n.01', 'name': 'Hygrophorus_sordidus'}, {'id': 21460, 'synset': 'hygrophorus_tennesseensis.n.01', 'name': 'Hygrophorus_tennesseensis'}, {'id': 21461, 'synset': 'hygrophorus_turundus.n.01', 'name': 'Hygrophorus_turundus'}, {'id': 21462, 'synset': 'neohygrophorus_angelesianus.n.01', 'name': 'Neohygrophorus_angelesianus'}, {'id': 21463, 'synset': 'cortinarius_armillatus.n.01', 'name': 'Cortinarius_armillatus'}, {'id': 21464, 'synset': 'cortinarius_atkinsonianus.n.01', 'name': 'Cortinarius_atkinsonianus'}, {'id': 21465, 'synset': 'cortinarius_corrugatus.n.01', 'name': 'Cortinarius_corrugatus'}, {'id': 21466, 'synset': 'cortinarius_gentilis.n.01', 'name': 'Cortinarius_gentilis'}, {'id': 21467, 'synset': 'cortinarius_mutabilis.n.01', 'name': 'Cortinarius_mutabilis'}, {'id': 21468, 'synset': 'cortinarius_semisanguineus.n.01', 'name': 'Cortinarius_semisanguineus'}, {'id': 21469, 'synset': 'cortinarius_subfoetidus.n.01', 'name': 'Cortinarius_subfoetidus'}, {'id': 21470, 'synset': 'cortinarius_violaceus.n.01', 'name': 'Cortinarius_violaceus'}, {'id': 21471, 'synset': 'gymnopilus_spectabilis.n.01', 'name': 'Gymnopilus_spectabilis'}, {'id': 21472, 'synset': 'gymnopilus_validipes.n.01', 'name': 'Gymnopilus_validipes'}, {'id': 21473, 'synset': 'gymnopilus_ventricosus.n.01', 'name': 'Gymnopilus_ventricosus'}, {'id': 21474, 'synset': 'mold.n.05', 'name': 'mold'}, {'id': 21475, 'synset': 'mildew.n.02', 'name': 'mildew'}, {'id': 21476, 'synset': 'verticillium.n.01', 'name': 'verticillium'}, {'id': 21477, 'synset': 'monilia.n.01', 'name': 'monilia'}, {'id': 21478, 'synset': 'candida.n.01', 'name': 'candida'}, {'id': 21479, 'synset': 'candida_albicans.n.01', 'name': 'Candida_albicans'}, {'id': 21480, 'synset': 'blastomycete.n.01', 'name': 'blastomycete'}, {'id': 21481, 'synset': 'yellow_spot_fungus.n.01', 'name': 'yellow_spot_fungus'}, {'id': 21482, 'synset': 'green_smut_fungus.n.01', 'name': 'green_smut_fungus'}, {'id': 21483, 'synset': 'dry_rot.n.02', 'name': 'dry_rot'}, {'id': 21484, 'synset': 'rhizoctinia.n.01', 'name': 'rhizoctinia'}, {'id': 21485, 'synset': 'houseplant.n.01', 'name': 'houseplant'}, {'id': 21486, 'synset': 'bedder.n.01', 'name': 'bedder'}, {'id': 21487, 'synset': 'succulent.n.01', 'name': 'succulent'}, {'id': 21488, 'synset': 'cultivar.n.01', 'name': 'cultivar'}, {'id': 21489, 'synset': 'weed.n.01', 'name': 'weed'}, {'id': 21490, 'synset': 'wort.n.01', 'name': 'wort'}, {'id': 21491, 'synset': 'brier.n.02', 'name': 'brier'}, {'id': 21492, 'synset': 'aril.n.01', 'name': 'aril'}, {'id': 21493, 'synset': 'sporophyll.n.01', 'name': 'sporophyll'}, {'id': 21494, 'synset': 'sporangium.n.01', 'name': 'sporangium'}, {'id': 21495, 'synset': 'sporangiophore.n.01', 'name': 'sporangiophore'}, {'id': 21496, 'synset': 'ascus.n.01', 'name': 'ascus'}, {'id': 21497, 'synset': 'ascospore.n.01', 'name': 'ascospore'}, {'id': 21498, 'synset': 'arthrospore.n.02', 'name': 'arthrospore'}, {'id': 21499, 'synset': 'eusporangium.n.01', 'name': 'eusporangium'}, {'id': 21500, 'synset': 'tetrasporangium.n.01', 'name': 'tetrasporangium'}, {'id': 21501, 'synset': 'gametangium.n.01', 'name': 'gametangium'}, {'id': 21502, 'synset': 'sorus.n.02', 'name': 'sorus'}, {'id': 21503, 'synset': 'sorus.n.01', 'name': 'sorus'}, {'id': 21504, 'synset': 'partial_veil.n.01', 'name': 'partial_veil'}, {'id': 21505, 'synset': 'lignum.n.01', 'name': 'lignum'}, {'id': 21506, 'synset': 'vascular_ray.n.01', 'name': 'vascular_ray'}, {'id': 21507, 'synset': 'phloem.n.01', 'name': 'phloem'}, {'id': 21508, 'synset': 'evergreen.n.01', 'name': 'evergreen'}, {'id': 21509, 'synset': 'deciduous_plant.n.01', 'name': 'deciduous_plant'}, {'id': 21510, 'synset': 'poisonous_plant.n.01', 'name': 'poisonous_plant'}, {'id': 21511, 'synset': 'vine.n.01', 'name': 'vine'}, {'id': 21512, 'synset': 'creeper.n.01', 'name': 'creeper'}, {'id': 21513, 'synset': 'tendril.n.01', 'name': 'tendril'}, {'id': 21514, 'synset': 'root_climber.n.01', 'name': 'root_climber'}, {'id': 21515, 'synset': 'lignosae.n.01', 'name': 'lignosae'}, {'id': 21516, 'synset': 'arborescent_plant.n.01', 'name': 'arborescent_plant'}, {'id': 21517, 'synset': 'snag.n.02', 'name': 'snag'}, {'id': 21518, 'synset': 'tree.n.01', 'name': 'tree'}, {'id': 21519, 'synset': 'timber_tree.n.01', 'name': 'timber_tree'}, {'id': 21520, 'synset': 'treelet.n.01', 'name': 'treelet'}, {'id': 21521, 'synset': 'arbor.n.01', 'name': 'arbor'}, {'id': 21522, 'synset': 'bean_tree.n.01', 'name': 'bean_tree'}, {'id': 21523, 'synset': 'pollard.n.01', 'name': 'pollard'}, {'id': 21524, 'synset': 'sapling.n.01', 'name': 'sapling'}, {'id': 21525, 'synset': 'shade_tree.n.01', 'name': 'shade_tree'}, {'id': 21526, 'synset': 'gymnospermous_tree.n.01', 'name': 'gymnospermous_tree'}, {'id': 21527, 'synset': 'conifer.n.01', 'name': 'conifer'}, {'id': 21528, 'synset': 'angiospermous_tree.n.01', 'name': 'angiospermous_tree'}, {'id': 21529, 'synset': 'nut_tree.n.01', 'name': 'nut_tree'}, {'id': 21530, 'synset': 'spice_tree.n.01', 'name': 'spice_tree'}, {'id': 21531, 'synset': 'fever_tree.n.01', 'name': 'fever_tree'}, {'id': 21532, 'synset': 'stump.n.01', 'name': 'stump'}, {'id': 21533, 'synset': 'bonsai.n.01', 'name': 'bonsai'}, {'id': 21534, 'synset': 'ming_tree.n.02', 'name': 'ming_tree'}, {'id': 21535, 'synset': 'ming_tree.n.01', 'name': 'ming_tree'}, {'id': 21536, 'synset': 'undershrub.n.01', 'name': 'undershrub'}, {'id': 21537, 'synset': 'subshrub.n.01', 'name': 'subshrub'}, {'id': 21538, 'synset': 'bramble.n.01', 'name': 'bramble'}, {'id': 21539, 'synset': 'liana.n.01', 'name': 'liana'}, {'id': 21540, 'synset': 'geophyte.n.01', 'name': 'geophyte'}, {'id': 21541, 'synset': 'desert_plant.n.01', 'name': 'desert_plant'}, {'id': 21542, 'synset': 'mesophyte.n.01', 'name': 'mesophyte'}, {'id': 21543, 'synset': 'marsh_plant.n.01', 'name': 'marsh_plant'}, {'id': 21544, 'synset': 'hemiepiphyte.n.01', 'name': 'hemiepiphyte'}, {'id': 21545, 'synset': 'strangler.n.01', 'name': 'strangler'}, {'id': 21546, 'synset': 'lithophyte.n.01', 'name': 'lithophyte'}, {'id': 21547, 'synset': 'saprobe.n.01', 'name': 'saprobe'}, {'id': 21548, 'synset': 'autophyte.n.01', 'name': 'autophyte'}, {'id': 21549, 'synset': 'root.n.01', 'name': 'root'}, {'id': 21550, 'synset': 'taproot.n.01', 'name': 'taproot'}, {'id': 21551, 'synset': 'prop_root.n.01', 'name': 'prop_root'}, {'id': 21552, 'synset': 'prophyll.n.01', 'name': 'prophyll'}, {'id': 21553, 'synset': 'rootstock.n.02', 'name': 'rootstock'}, {'id': 21554, 'synset': 'quickset.n.01', 'name': 'quickset'}, {'id': 21555, 'synset': 'stolon.n.01', 'name': 'stolon'}, {'id': 21556, 'synset': 'tuberous_plant.n.01', 'name': 'tuberous_plant'}, {'id': 21557, 'synset': 'rhizome.n.01', 'name': 'rhizome'}, {'id': 21558, 'synset': 'rachis.n.01', 'name': 'rachis'}, {'id': 21559, 'synset': 'caudex.n.02', 'name': 'caudex'}, {'id': 21560, 'synset': 'cladode.n.01', 'name': 'cladode'}, {'id': 21561, 'synset': 'receptacle.n.02', 'name': 'receptacle'}, {'id': 21562, 'synset': 'scape.n.01', 'name': 'scape'}, {'id': 21563, 'synset': 'umbel.n.01', 'name': 'umbel'}, {'id': 21564, 'synset': 'petiole.n.01', 'name': 'petiole'}, {'id': 21565, 'synset': 'peduncle.n.02', 'name': 'peduncle'}, {'id': 21566, 'synset': 'pedicel.n.01', 'name': 'pedicel'}, {'id': 21567, 'synset': 'flower_cluster.n.01', 'name': 'flower_cluster'}, {'id': 21568, 'synset': 'raceme.n.01', 'name': 'raceme'}, {'id': 21569, 'synset': 'panicle.n.01', 'name': 'panicle'}, {'id': 21570, 'synset': 'thyrse.n.01', 'name': 'thyrse'}, {'id': 21571, 'synset': 'cyme.n.01', 'name': 'cyme'}, {'id': 21572, 'synset': 'cymule.n.01', 'name': 'cymule'}, {'id': 21573, 'synset': 'glomerule.n.01', 'name': 'glomerule'}, {'id': 21574, 'synset': 'scorpioid_cyme.n.01', 'name': 'scorpioid_cyme'}, {'id': 21575, 'synset': 'ear.n.05', 'name': 'ear'}, {'id': 21576, 'synset': 'spadix.n.01', 'name': 'spadix'}, {'id': 21577, 'synset': 'bulbous_plant.n.01', 'name': 'bulbous_plant'}, {'id': 21578, 'synset': 'bulbil.n.01', 'name': 'bulbil'}, {'id': 21579, 'synset': 'cormous_plant.n.01', 'name': 'cormous_plant'}, {'id': 21580, 'synset': 'fruit.n.01', 'name': 'fruit'}, {'id': 21581, 'synset': 'fruitlet.n.01', 'name': 'fruitlet'}, {'id': 21582, 'synset': 'seed.n.01', 'name': 'seed'}, {'id': 21583, 'synset': 'bean.n.02', 'name': 'bean'}, {'id': 21584, 'synset': 'nut.n.01', 'name': 'nut'}, {'id': 21585, 'synset': 'nutlet.n.01', 'name': 'nutlet'}, {'id': 21586, 'synset': 'kernel.n.01', 'name': 'kernel'}, {'id': 21587, 'synset': 'syconium.n.01', 'name': 'syconium'}, {'id': 21588, 'synset': 'berry.n.02', 'name': 'berry'}, {'id': 21589, 'synset': 'aggregate_fruit.n.01', 'name': 'aggregate_fruit'}, {'id': 21590, 'synset': 'simple_fruit.n.01', 'name': 'simple_fruit'}, {'id': 21591, 'synset': 'acinus.n.01', 'name': 'acinus'}, {'id': 21592, 'synset': 'drupe.n.01', 'name': 'drupe'}, {'id': 21593, 'synset': 'drupelet.n.01', 'name': 'drupelet'}, {'id': 21594, 'synset': 'pome.n.01', 'name': 'pome'}, {'id': 21595, 'synset': 'pod.n.02', 'name': 'pod'}, {'id': 21596, 'synset': 'loment.n.01', 'name': 'loment'}, {'id': 21597, 'synset': 'pyxidium.n.01', 'name': 'pyxidium'}, {'id': 21598, 'synset': 'husk.n.02', 'name': 'husk'}, {'id': 21599, 'synset': 'cornhusk.n.01', 'name': 'cornhusk'}, {'id': 21600, 'synset': 'pod.n.01', 'name': 'pod'}, {'id': 21601, 'synset': 'accessory_fruit.n.01', 'name': 'accessory_fruit'}, {'id': 21602, 'synset': 'buckthorn.n.01', 'name': 'buckthorn'}, {'id': 21603, 'synset': 'buckthorn_berry.n.01', 'name': 'buckthorn_berry'}, {'id': 21604, 'synset': 'cascara_buckthorn.n.01', 'name': 'cascara_buckthorn'}, {'id': 21605, 'synset': 'cascara.n.01', 'name': 'cascara'}, {'id': 21606, 'synset': 'carolina_buckthorn.n.01', 'name': 'Carolina_buckthorn'}, {'id': 21607, 'synset': 'coffeeberry.n.01', 'name': 'coffeeberry'}, {'id': 21608, 'synset': 'redberry.n.01', 'name': 'redberry'}, {'id': 21609, 'synset': 'nakedwood.n.01', 'name': 'nakedwood'}, {'id': 21610, 'synset': 'jujube.n.01', 'name': 'jujube'}, {'id': 21611, 'synset': "christ's-thorn.n.01", 'name': "Christ's-thorn"}, {'id': 21612, 'synset': 'hazel.n.01', 'name': 'hazel'}, {'id': 21613, 'synset': 'fox_grape.n.01', 'name': 'fox_grape'}, {'id': 21614, 'synset': 'muscadine.n.01', 'name': 'muscadine'}, {'id': 21615, 'synset': 'vinifera.n.01', 'name': 'vinifera'}, {'id': 21616, 'synset': 'pinot_blanc.n.01', 'name': 'Pinot_blanc'}, {'id': 21617, 'synset': 'sauvignon_grape.n.01', 'name': 'Sauvignon_grape'}, {'id': 21618, 'synset': 'sauvignon_blanc.n.01', 'name': 'Sauvignon_blanc'}, {'id': 21619, 'synset': 'muscadet.n.01', 'name': 'Muscadet'}, {'id': 21620, 'synset': 'riesling.n.01', 'name': 'Riesling'}, {'id': 21621, 'synset': 'zinfandel.n.01', 'name': 'Zinfandel'}, {'id': 21622, 'synset': 'chenin_blanc.n.01', 'name': 'Chenin_blanc'}, {'id': 21623, 'synset': 'malvasia.n.01', 'name': 'malvasia'}, {'id': 21624, 'synset': 'verdicchio.n.01', 'name': 'Verdicchio'}, {'id': 21625, 'synset': 'boston_ivy.n.01', 'name': 'Boston_ivy'}, {'id': 21626, 'synset': 'virginia_creeper.n.01', 'name': 'Virginia_creeper'}, {'id': 21627, 'synset': 'true_pepper.n.01', 'name': 'true_pepper'}, {'id': 21628, 'synset': 'betel.n.01', 'name': 'betel'}, {'id': 21629, 'synset': 'cubeb.n.01', 'name': 'cubeb'}, {'id': 21630, 'synset': 'schizocarp.n.01', 'name': 'schizocarp'}, {'id': 21631, 'synset': 'peperomia.n.01', 'name': 'peperomia'}, {'id': 21632, 'synset': 'watermelon_begonia.n.01', 'name': 'watermelon_begonia'}, {'id': 21633, 'synset': 'yerba_mansa.n.01', 'name': 'yerba_mansa'}, {'id': 21634, 'synset': 'pinna.n.01', 'name': 'pinna'}, {'id': 21635, 'synset': 'frond.n.01', 'name': 'frond'}, {'id': 21636, 'synset': 'bract.n.01', 'name': 'bract'}, {'id': 21637, 'synset': 'bracteole.n.01', 'name': 'bracteole'}, {'id': 21638, 'synset': 'involucre.n.01', 'name': 'involucre'}, {'id': 21639, 'synset': 'glume.n.01', 'name': 'glume'}, {'id': 21640, 'synset': 'palmate_leaf.n.01', 'name': 'palmate_leaf'}, {'id': 21641, 'synset': 'pinnate_leaf.n.01', 'name': 'pinnate_leaf'}, {'id': 21642, 'synset': 'bijugate_leaf.n.01', 'name': 'bijugate_leaf'}, {'id': 21643, 'synset': 'decompound_leaf.n.01', 'name': 'decompound_leaf'}, {'id': 21644, 'synset': 'acuminate_leaf.n.01', 'name': 'acuminate_leaf'}, {'id': 21645, 'synset': 'deltoid_leaf.n.01', 'name': 'deltoid_leaf'}, {'id': 21646, 'synset': 'ensiform_leaf.n.01', 'name': 'ensiform_leaf'}, {'id': 21647, 'synset': 'linear_leaf.n.01', 'name': 'linear_leaf'}, {'id': 21648, 'synset': 'lyrate_leaf.n.01', 'name': 'lyrate_leaf'}, {'id': 21649, 'synset': 'obtuse_leaf.n.01', 'name': 'obtuse_leaf'}, {'id': 21650, 'synset': 'oblanceolate_leaf.n.01', 'name': 'oblanceolate_leaf'}, {'id': 21651, 'synset': 'pandurate_leaf.n.01', 'name': 'pandurate_leaf'}, {'id': 21652, 'synset': 'reniform_leaf.n.01', 'name': 'reniform_leaf'}, {'id': 21653, 'synset': 'spatulate_leaf.n.01', 'name': 'spatulate_leaf'}, {'id': 21654, 'synset': 'even-pinnate_leaf.n.01', 'name': 'even-pinnate_leaf'}, {'id': 21655, 'synset': 'odd-pinnate_leaf.n.01', 'name': 'odd-pinnate_leaf'}, {'id': 21656, 'synset': 'pedate_leaf.n.01', 'name': 'pedate_leaf'}, {'id': 21657, 'synset': 'crenate_leaf.n.01', 'name': 'crenate_leaf'}, {'id': 21658, 'synset': 'dentate_leaf.n.01', 'name': 'dentate_leaf'}, {'id': 21659, 'synset': 'denticulate_leaf.n.01', 'name': 'denticulate_leaf'}, {'id': 21660, 'synset': 'erose_leaf.n.01', 'name': 'erose_leaf'}, {'id': 21661, 'synset': 'runcinate_leaf.n.01', 'name': 'runcinate_leaf'}, {'id': 21662, 'synset': 'prickly-edged_leaf.n.01', 'name': 'prickly-edged_leaf'}, {'id': 21663, 'synset': 'deadwood.n.01', 'name': 'deadwood'}, {'id': 21664, 'synset': 'haulm.n.01', 'name': 'haulm'}, {'id': 21665, 'synset': 'branchlet.n.01', 'name': 'branchlet'}, {'id': 21666, 'synset': 'osier.n.01', 'name': 'osier'}, {'id': 21667, 'synset': 'giant_scrambling_fern.n.01', 'name': 'giant_scrambling_fern'}, {'id': 21668, 'synset': 'umbrella_fern.n.01', 'name': 'umbrella_fern'}, {'id': 21669, 'synset': 'floating_fern.n.02', 'name': 'floating_fern'}, {'id': 21670, 'synset': 'polypody.n.01', 'name': 'polypody'}, {'id': 21671, 'synset': 'licorice_fern.n.01', 'name': 'licorice_fern'}, {'id': 21672, 'synset': 'grey_polypody.n.01', 'name': 'grey_polypody'}, {'id': 21673, 'synset': 'leatherleaf.n.01', 'name': 'leatherleaf'}, {'id': 21674, 'synset': 'rock_polypody.n.01', 'name': 'rock_polypody'}, {'id': 21675, 'synset': 'common_polypody.n.01', 'name': 'common_polypody'}, {'id': 21676, 'synset': "bear's-paw_fern.n.01", 'name': "bear's-paw_fern"}, {'id': 21677, 'synset': 'strap_fern.n.01', 'name': 'strap_fern'}, {'id': 21678, 'synset': 'florida_strap_fern.n.01', 'name': 'Florida_strap_fern'}, {'id': 21679, 'synset': 'basket_fern.n.02', 'name': 'basket_fern'}, {'id': 21680, 'synset': 'snake_polypody.n.01', 'name': 'snake_polypody'}, {'id': 21681, 'synset': "climbing_bird's_nest_fern.n.01", 'name': "climbing_bird's_nest_fern"}, {'id': 21682, 'synset': 'golden_polypody.n.01', 'name': 'golden_polypody'}, {'id': 21683, 'synset': 'staghorn_fern.n.01', 'name': 'staghorn_fern'}, {'id': 21684, 'synset': 'south_american_staghorn.n.01', 'name': 'South_American_staghorn'}, {'id': 21685, 'synset': 'common_staghorn_fern.n.01', 'name': 'common_staghorn_fern'}, {'id': 21686, 'synset': 'felt_fern.n.01', 'name': 'felt_fern'}, {'id': 21687, 'synset': 'potato_fern.n.02', 'name': 'potato_fern'}, {'id': 21688, 'synset': 'myrmecophyte.n.01', 'name': 'myrmecophyte'}, {'id': 21689, 'synset': 'grass_fern.n.01', 'name': 'grass_fern'}, {'id': 21690, 'synset': 'spleenwort.n.01', 'name': 'spleenwort'}, {'id': 21691, 'synset': 'black_spleenwort.n.01', 'name': 'black_spleenwort'}, {'id': 21692, 'synset': "bird's_nest_fern.n.01", 'name': "bird's_nest_fern"}, {'id': 21693, 'synset': 'ebony_spleenwort.n.01', 'name': 'ebony_spleenwort'}, {'id': 21694, 'synset': 'black-stem_spleenwort.n.01', 'name': 'black-stem_spleenwort'}, {'id': 21695, 'synset': 'walking_fern.n.01', 'name': 'walking_fern'}, {'id': 21696, 'synset': 'green_spleenwort.n.01', 'name': 'green_spleenwort'}, {'id': 21697, 'synset': 'mountain_spleenwort.n.01', 'name': 'mountain_spleenwort'}, {'id': 21698, 'synset': 'lobed_spleenwort.n.01', 'name': 'lobed_spleenwort'}, {'id': 21699, 'synset': 'lanceolate_spleenwort.n.01', 'name': 'lanceolate_spleenwort'}, {'id': 21700, 'synset': "hart's-tongue.n.02", 'name': "hart's-tongue"}, {'id': 21701, 'synset': 'scale_fern.n.01', 'name': 'scale_fern'}, {'id': 21702, 'synset': 'scolopendrium.n.01', 'name': 'scolopendrium'}, {'id': 21703, 'synset': 'deer_fern.n.01', 'name': 'deer_fern'}, {'id': 21704, 'synset': 'doodia.n.01', 'name': 'doodia'}, {'id': 21705, 'synset': 'chain_fern.n.01', 'name': 'chain_fern'}, {'id': 21706, 'synset': 'virginia_chain_fern.n.01', 'name': 'Virginia_chain_fern'}, {'id': 21707, 'synset': 'silver_tree_fern.n.01', 'name': 'silver_tree_fern'}, {'id': 21708, 'synset': 'davallia.n.01', 'name': 'davallia'}, {'id': 21709, 'synset': "hare's-foot_fern.n.01", 'name': "hare's-foot_fern"}, {'id': 21710, 'synset': "canary_island_hare's_foot_fern.n.01", 'name': "Canary_Island_hare's_foot_fern"}, {'id': 21711, 'synset': "squirrel's-foot_fern.n.01", 'name': "squirrel's-foot_fern"}, {'id': 21712, 'synset': 'bracken.n.01', 'name': 'bracken'}, {'id': 21713, 'synset': 'soft_tree_fern.n.01', 'name': 'soft_tree_fern'}, {'id': 21714, 'synset': 'scythian_lamb.n.01', 'name': 'Scythian_lamb'}, {'id': 21715, 'synset': 'false_bracken.n.01', 'name': 'false_bracken'}, {'id': 21716, 'synset': 'thyrsopteris.n.01', 'name': 'thyrsopteris'}, {'id': 21717, 'synset': 'shield_fern.n.01', 'name': 'shield_fern'}, {'id': 21718, 'synset': 'broad_buckler-fern.n.01', 'name': 'broad_buckler-fern'}, {'id': 21719, 'synset': 'fragrant_cliff_fern.n.01', 'name': 'fragrant_cliff_fern'}, {'id': 21720, 'synset': "goldie's_fern.n.01", 'name': "Goldie's_fern"}, {'id': 21721, 'synset': 'wood_fern.n.01', 'name': 'wood_fern'}, {'id': 21722, 'synset': 'male_fern.n.01', 'name': 'male_fern'}, {'id': 21723, 'synset': 'marginal_wood_fern.n.01', 'name': 'marginal_wood_fern'}, {'id': 21724, 'synset': 'mountain_male_fern.n.01', 'name': 'mountain_male_fern'}, {'id': 21725, 'synset': 'lady_fern.n.01', 'name': 'lady_fern'}, {'id': 21726, 'synset': 'alpine_lady_fern.n.01', 'name': 'Alpine_lady_fern'}, {'id': 21727, 'synset': 'silvery_spleenwort.n.02', 'name': 'silvery_spleenwort'}, {'id': 21728, 'synset': 'holly_fern.n.02', 'name': 'holly_fern'}, {'id': 21729, 'synset': 'bladder_fern.n.01', 'name': 'bladder_fern'}, {'id': 21730, 'synset': 'brittle_bladder_fern.n.01', 'name': 'brittle_bladder_fern'}, {'id': 21731, 'synset': 'mountain_bladder_fern.n.01', 'name': 'mountain_bladder_fern'}, {'id': 21732, 'synset': 'bulblet_fern.n.01', 'name': 'bulblet_fern'}, {'id': 21733, 'synset': 'silvery_spleenwort.n.01', 'name': 'silvery_spleenwort'}, {'id': 21734, 'synset': 'oak_fern.n.01', 'name': 'oak_fern'}, {'id': 21735, 'synset': 'limestone_fern.n.01', 'name': 'limestone_fern'}, {'id': 21736, 'synset': 'ostrich_fern.n.01', 'name': 'ostrich_fern'}, {'id': 21737, 'synset': "hart's-tongue.n.01", 'name': "hart's-tongue"}, {'id': 21738, 'synset': 'sensitive_fern.n.01', 'name': 'sensitive_fern'}, {'id': 21739, 'synset': 'christmas_fern.n.01', 'name': 'Christmas_fern'}, {'id': 21740, 'synset': 'holly_fern.n.01', 'name': 'holly_fern'}, {'id': 21741, 'synset': "braun's_holly_fern.n.01", 'name': "Braun's_holly_fern"}, {'id': 21742, 'synset': 'western_holly_fern.n.01', 'name': 'western_holly_fern'}, {'id': 21743, 'synset': 'soft_shield_fern.n.01', 'name': 'soft_shield_fern'}, {'id': 21744, 'synset': 'leather_fern.n.02', 'name': 'leather_fern'}, {'id': 21745, 'synset': 'button_fern.n.02', 'name': 'button_fern'}, {'id': 21746, 'synset': 'indian_button_fern.n.01', 'name': 'Indian_button_fern'}, {'id': 21747, 'synset': 'woodsia.n.01', 'name': 'woodsia'}, {'id': 21748, 'synset': 'rusty_woodsia.n.01', 'name': 'rusty_woodsia'}, {'id': 21749, 'synset': 'alpine_woodsia.n.01', 'name': 'Alpine_woodsia'}, {'id': 21750, 'synset': 'smooth_woodsia.n.01', 'name': 'smooth_woodsia'}, {'id': 21751, 'synset': 'boston_fern.n.01', 'name': 'Boston_fern'}, {'id': 21752, 'synset': 'basket_fern.n.01', 'name': 'basket_fern'}, {'id': 21753, 'synset': 'golden_fern.n.02', 'name': 'golden_fern'}, {'id': 21754, 'synset': 'maidenhair.n.01', 'name': 'maidenhair'}, {'id': 21755, 'synset': 'common_maidenhair.n.01', 'name': 'common_maidenhair'}, {'id': 21756, 'synset': 'american_maidenhair_fern.n.01', 'name': 'American_maidenhair_fern'}, {'id': 21757, 'synset': 'bermuda_maidenhair.n.01', 'name': 'Bermuda_maidenhair'}, {'id': 21758, 'synset': 'brittle_maidenhair.n.01', 'name': 'brittle_maidenhair'}, {'id': 21759, 'synset': 'farley_maidenhair.n.01', 'name': 'Farley_maidenhair'}, {'id': 21760, 'synset': 'annual_fern.n.01', 'name': 'annual_fern'}, {'id': 21761, 'synset': 'lip_fern.n.01', 'name': 'lip_fern'}, {'id': 21762, 'synset': 'smooth_lip_fern.n.01', 'name': 'smooth_lip_fern'}, {'id': 21763, 'synset': 'lace_fern.n.01', 'name': 'lace_fern'}, {'id': 21764, 'synset': 'wooly_lip_fern.n.01', 'name': 'wooly_lip_fern'}, {'id': 21765, 'synset': 'southwestern_lip_fern.n.01', 'name': 'southwestern_lip_fern'}, {'id': 21766, 'synset': 'bamboo_fern.n.01', 'name': 'bamboo_fern'}, {'id': 21767, 'synset': 'american_rock_brake.n.01', 'name': 'American_rock_brake'}, {'id': 21768, 'synset': 'european_parsley_fern.n.01', 'name': 'European_parsley_fern'}, {'id': 21769, 'synset': 'hand_fern.n.01', 'name': 'hand_fern'}, {'id': 21770, 'synset': 'cliff_brake.n.01', 'name': 'cliff_brake'}, {'id': 21771, 'synset': 'coffee_fern.n.01', 'name': 'coffee_fern'}, {'id': 21772, 'synset': 'purple_rock_brake.n.01', 'name': 'purple_rock_brake'}, {'id': 21773, 'synset': "bird's-foot_fern.n.01", 'name': "bird's-foot_fern"}, {'id': 21774, 'synset': 'button_fern.n.01', 'name': 'button_fern'}, {'id': 21775, 'synset': 'silver_fern.n.02', 'name': 'silver_fern'}, {'id': 21776, 'synset': 'golden_fern.n.01', 'name': 'golden_fern'}, {'id': 21777, 'synset': 'gold_fern.n.01', 'name': 'gold_fern'}, {'id': 21778, 'synset': 'pteris_cretica.n.01', 'name': 'Pteris_cretica'}, {'id': 21779, 'synset': 'spider_brake.n.01', 'name': 'spider_brake'}, {'id': 21780, 'synset': 'ribbon_fern.n.01', 'name': 'ribbon_fern'}, {'id': 21781, 'synset': 'potato_fern.n.01', 'name': 'potato_fern'}, {'id': 21782, 'synset': 'angiopteris.n.01', 'name': 'angiopteris'}, {'id': 21783, 'synset': 'skeleton_fork_fern.n.01', 'name': 'skeleton_fork_fern'}, {'id': 21784, 'synset': 'horsetail.n.01', 'name': 'horsetail'}, {'id': 21785, 'synset': 'common_horsetail.n.01', 'name': 'common_horsetail'}, {'id': 21786, 'synset': 'swamp_horsetail.n.01', 'name': 'swamp_horsetail'}, {'id': 21787, 'synset': 'scouring_rush.n.01', 'name': 'scouring_rush'}, {'id': 21788, 'synset': 'marsh_horsetail.n.01', 'name': 'marsh_horsetail'}, {'id': 21789, 'synset': 'wood_horsetail.n.01', 'name': 'wood_horsetail'}, {'id': 21790, 'synset': 'variegated_horsetail.n.01', 'name': 'variegated_horsetail'}, {'id': 21791, 'synset': 'club_moss.n.01', 'name': 'club_moss'}, {'id': 21792, 'synset': 'shining_clubmoss.n.01', 'name': 'shining_clubmoss'}, {'id': 21793, 'synset': 'alpine_clubmoss.n.01', 'name': 'alpine_clubmoss'}, {'id': 21794, 'synset': 'fir_clubmoss.n.01', 'name': 'fir_clubmoss'}, {'id': 21795, 'synset': 'ground_cedar.n.01', 'name': 'ground_cedar'}, {'id': 21796, 'synset': 'ground_fir.n.01', 'name': 'ground_fir'}, {'id': 21797, 'synset': 'foxtail_grass.n.01', 'name': 'foxtail_grass'}, {'id': 21798, 'synset': 'spikemoss.n.01', 'name': 'spikemoss'}, {'id': 21799, 'synset': 'meadow_spikemoss.n.01', 'name': 'meadow_spikemoss'}, {'id': 21800, 'synset': 'desert_selaginella.n.01', 'name': 'desert_selaginella'}, {'id': 21801, 'synset': 'resurrection_plant.n.01', 'name': 'resurrection_plant'}, {'id': 21802, 'synset': 'florida_selaginella.n.01', 'name': 'florida_selaginella'}, {'id': 21803, 'synset': 'quillwort.n.01', 'name': 'quillwort'}, {'id': 21804, 'synset': 'earthtongue.n.01', 'name': 'earthtongue'}, {'id': 21805, 'synset': 'snuffbox_fern.n.01', 'name': 'snuffbox_fern'}, {'id': 21806, 'synset': 'christella.n.01', 'name': 'christella'}, {'id': 21807, 'synset': 'mountain_fern.n.01', 'name': 'mountain_fern'}, {'id': 21808, 'synset': 'new_york_fern.n.01', 'name': 'New_York_fern'}, {'id': 21809, 'synset': 'massachusetts_fern.n.01', 'name': 'Massachusetts_fern'}, {'id': 21810, 'synset': 'beech_fern.n.01', 'name': 'beech_fern'}, {'id': 21811, 'synset': 'broad_beech_fern.n.01', 'name': 'broad_beech_fern'}, {'id': 21812, 'synset': 'long_beech_fern.n.01', 'name': 'long_beech_fern'}, {'id': 21813, 'synset': 'shoestring_fungus.n.01', 'name': 'shoestring_fungus'}, {'id': 21814, 'synset': 'armillaria_caligata.n.01', 'name': 'Armillaria_caligata'}, {'id': 21815, 'synset': 'armillaria_ponderosa.n.01', 'name': 'Armillaria_ponderosa'}, {'id': 21816, 'synset': 'armillaria_zelleri.n.01', 'name': 'Armillaria_zelleri'}, {'id': 21817, 'synset': 'honey_mushroom.n.01', 'name': 'honey_mushroom'}, {'id': 21818, 'synset': 'milkweed.n.01', 'name': 'milkweed'}, {'id': 21819, 'synset': 'white_milkweed.n.01', 'name': 'white_milkweed'}, {'id': 21820, 'synset': 'poke_milkweed.n.01', 'name': 'poke_milkweed'}, {'id': 21821, 'synset': 'swamp_milkweed.n.01', 'name': 'swamp_milkweed'}, {'id': 21822, 'synset': "mead's_milkweed.n.01", 'name': "Mead's_milkweed"}, {'id': 21823, 'synset': 'purple_silkweed.n.01', 'name': 'purple_silkweed'}, {'id': 21824, 'synset': 'showy_milkweed.n.01', 'name': 'showy_milkweed'}, {'id': 21825, 'synset': 'poison_milkweed.n.01', 'name': 'poison_milkweed'}, {'id': 21826, 'synset': 'butterfly_weed.n.01', 'name': 'butterfly_weed'}, {'id': 21827, 'synset': 'whorled_milkweed.n.01', 'name': 'whorled_milkweed'}, {'id': 21828, 'synset': 'cruel_plant.n.01', 'name': 'cruel_plant'}, {'id': 21829, 'synset': 'wax_plant.n.01', 'name': 'wax_plant'}, {'id': 21830, 'synset': 'silk_vine.n.01', 'name': 'silk_vine'}, {'id': 21831, 'synset': 'stapelia.n.01', 'name': 'stapelia'}, {'id': 21832, 'synset': 'stapelias_asterias.n.01', 'name': 'Stapelias_asterias'}, {'id': 21833, 'synset': 'stephanotis.n.01', 'name': 'stephanotis'}, {'id': 21834, 'synset': 'madagascar_jasmine.n.01', 'name': 'Madagascar_jasmine'}, {'id': 21835, 'synset': 'negro_vine.n.01', 'name': 'negro_vine'}, {'id': 21836, 'synset': 'zygospore.n.01', 'name': 'zygospore'}, {'id': 21837, 'synset': 'tree_of_knowledge.n.01', 'name': 'tree_of_knowledge'}, {'id': 21838, 'synset': 'orangery.n.01', 'name': 'orangery'}, {'id': 21839, 'synset': 'pocketbook.n.01', 'name': 'pocketbook'}, {'id': 21840, 'synset': 'shit.n.04', 'name': 'shit'}, {'id': 21841, 'synset': 'cordage.n.01', 'name': 'cordage'}, {'id': 21842, 'synset': 'yard.n.01', 'name': 'yard'}, {'id': 21843, 'synset': 'extremum.n.02', 'name': 'extremum'}, {'id': 21844, 'synset': 'leaf_shape.n.01', 'name': 'leaf_shape'}, {'id': 21845, 'synset': 'equilateral.n.01', 'name': 'equilateral'}, {'id': 21846, 'synset': 'figure.n.06', 'name': 'figure'}, {'id': 21847, 'synset': 'pencil.n.03', 'name': 'pencil'}, {'id': 21848, 'synset': 'plane_figure.n.01', 'name': 'plane_figure'}, {'id': 21849, 'synset': 'solid_figure.n.01', 'name': 'solid_figure'}, {'id': 21850, 'synset': 'line.n.04', 'name': 'line'}, {'id': 21851, 'synset': 'bulb.n.04', 'name': 'bulb'}, {'id': 21852, 'synset': 'convex_shape.n.01', 'name': 'convex_shape'}, {'id': 21853, 'synset': 'concave_shape.n.01', 'name': 'concave_shape'}, {'id': 21854, 'synset': 'cylinder.n.01', 'name': 'cylinder'}, {'id': 21855, 'synset': 'round_shape.n.01', 'name': 'round_shape'}, {'id': 21856, 'synset': 'heart.n.07', 'name': 'heart'}, {'id': 21857, 'synset': 'polygon.n.01', 'name': 'polygon'}, {'id': 21858, 'synset': 'convex_polygon.n.01', 'name': 'convex_polygon'}, {'id': 21859, 'synset': 'concave_polygon.n.01', 'name': 'concave_polygon'}, {'id': 21860, 'synset': 'reentrant_polygon.n.01', 'name': 'reentrant_polygon'}, {'id': 21861, 'synset': 'amorphous_shape.n.01', 'name': 'amorphous_shape'}, {'id': 21862, 'synset': 'closed_curve.n.01', 'name': 'closed_curve'}, {'id': 21863, 'synset': 'simple_closed_curve.n.01', 'name': 'simple_closed_curve'}, {'id': 21864, 'synset': 's-shape.n.01', 'name': 'S-shape'}, {'id': 21865, 'synset': 'wave.n.07', 'name': 'wave'}, {'id': 21866, 'synset': 'extrados.n.01', 'name': 'extrados'}, {'id': 21867, 'synset': 'hook.n.02', 'name': 'hook'}, {'id': 21868, 'synset': 'envelope.n.03', 'name': 'envelope'}, {'id': 21869, 'synset': 'bight.n.02', 'name': 'bight'}, {'id': 21870, 'synset': 'diameter.n.02', 'name': 'diameter'}, {'id': 21871, 'synset': 'cone.n.02', 'name': 'cone'}, {'id': 21872, 'synset': 'funnel.n.01', 'name': 'funnel'}, {'id': 21873, 'synset': 'oblong.n.01', 'name': 'oblong'}, {'id': 21874, 'synset': 'circle.n.01', 'name': 'circle'}, {'id': 21875, 'synset': 'circle.n.03', 'name': 'circle'}, {'id': 21876, 'synset': 'equator.n.02', 'name': 'equator'}, {'id': 21877, 'synset': 'scallop.n.01', 'name': 'scallop'}, {'id': 21878, 'synset': 'ring.n.02', 'name': 'ring'}, {'id': 21879, 'synset': 'loop.n.02', 'name': 'loop'}, {'id': 21880, 'synset': 'bight.n.01', 'name': 'bight'}, {'id': 21881, 'synset': 'helix.n.01', 'name': 'helix'}, {'id': 21882, 'synset': 'element_of_a_cone.n.01', 'name': 'element_of_a_cone'}, {'id': 21883, 'synset': 'element_of_a_cylinder.n.01', 'name': 'element_of_a_cylinder'}, {'id': 21884, 'synset': 'ellipse.n.01', 'name': 'ellipse'}, {'id': 21885, 'synset': 'quadrate.n.02', 'name': 'quadrate'}, {'id': 21886, 'synset': 'triangle.n.01', 'name': 'triangle'}, {'id': 21887, 'synset': 'acute_triangle.n.01', 'name': 'acute_triangle'}, {'id': 21888, 'synset': 'isosceles_triangle.n.01', 'name': 'isosceles_triangle'}, {'id': 21889, 'synset': 'obtuse_triangle.n.01', 'name': 'obtuse_triangle'}, {'id': 21890, 'synset': 'right_triangle.n.01', 'name': 'right_triangle'}, {'id': 21891, 'synset': 'scalene_triangle.n.01', 'name': 'scalene_triangle'}, {'id': 21892, 'synset': 'parallel.n.03', 'name': 'parallel'}, {'id': 21893, 'synset': 'trapezoid.n.01', 'name': 'trapezoid'}, {'id': 21894, 'synset': 'star.n.05', 'name': 'star'}, {'id': 21895, 'synset': 'pentagon.n.03', 'name': 'pentagon'}, {'id': 21896, 'synset': 'hexagon.n.01', 'name': 'hexagon'}, {'id': 21897, 'synset': 'heptagon.n.01', 'name': 'heptagon'}, {'id': 21898, 'synset': 'octagon.n.01', 'name': 'octagon'}, {'id': 21899, 'synset': 'nonagon.n.01', 'name': 'nonagon'}, {'id': 21900, 'synset': 'decagon.n.01', 'name': 'decagon'}, {'id': 21901, 'synset': 'rhombus.n.01', 'name': 'rhombus'}, {'id': 21902, 'synset': 'spherical_polygon.n.01', 'name': 'spherical_polygon'}, {'id': 21903, 'synset': 'spherical_triangle.n.01', 'name': 'spherical_triangle'}, {'id': 21904, 'synset': 'convex_polyhedron.n.01', 'name': 'convex_polyhedron'}, {'id': 21905, 'synset': 'concave_polyhedron.n.01', 'name': 'concave_polyhedron'}, {'id': 21906, 'synset': 'cuboid.n.01', 'name': 'cuboid'}, {'id': 21907, 'synset': 'quadrangular_prism.n.01', 'name': 'quadrangular_prism'}, {'id': 21908, 'synset': 'bell.n.05', 'name': 'bell'}, {'id': 21909, 'synset': 'angular_distance.n.01', 'name': 'angular_distance'}, {'id': 21910, 'synset': 'true_anomaly.n.01', 'name': 'true_anomaly'}, {'id': 21911, 'synset': 'spherical_angle.n.01', 'name': 'spherical_angle'}, {'id': 21912, 'synset': 'angle_of_refraction.n.01', 'name': 'angle_of_refraction'}, {'id': 21913, 'synset': 'acute_angle.n.01', 'name': 'acute_angle'}, {'id': 21914, 'synset': 'groove.n.01', 'name': 'groove'}, {'id': 21915, 'synset': 'rut.n.01', 'name': 'rut'}, {'id': 21916, 'synset': 'bulge.n.01', 'name': 'bulge'}, {'id': 21917, 'synset': 'belly.n.03', 'name': 'belly'}, {'id': 21918, 'synset': 'bow.n.05', 'name': 'bow'}, {'id': 21919, 'synset': 'crescent.n.01', 'name': 'crescent'}, {'id': 21920, 'synset': 'ellipsoid.n.01', 'name': 'ellipsoid'}, {'id': 21921, 'synset': 'hypotenuse.n.01', 'name': 'hypotenuse'}, {'id': 21922, 'synset': 'balance.n.04', 'name': 'balance'}, {'id': 21923, 'synset': 'conformation.n.01', 'name': 'conformation'}, {'id': 21924, 'synset': 'symmetry.n.02', 'name': 'symmetry'}, {'id': 21925, 'synset': 'spheroid.n.01', 'name': 'spheroid'}, {'id': 21926, 'synset': 'spherule.n.01', 'name': 'spherule'}, {'id': 21927, 'synset': 'toroid.n.01', 'name': 'toroid'}, {'id': 21928, 'synset': 'column.n.04', 'name': 'column'}, {'id': 21929, 'synset': 'barrel.n.03', 'name': 'barrel'}, {'id': 21930, 'synset': 'pipe.n.03', 'name': 'pipe'}, {'id': 21931, 'synset': 'pellet.n.01', 'name': 'pellet'}, {'id': 21932, 'synset': 'bolus.n.01', 'name': 'bolus'}, {'id': 21933, 'synset': 'dewdrop.n.01', 'name': 'dewdrop'}, {'id': 21934, 'synset': 'ridge.n.02', 'name': 'ridge'}, {'id': 21935, 'synset': 'rim.n.01', 'name': 'rim'}, {'id': 21936, 'synset': 'taper.n.01', 'name': 'taper'}, {'id': 21937, 'synset': 'boundary.n.02', 'name': 'boundary'}, {'id': 21938, 'synset': 'incisure.n.01', 'name': 'incisure'}, {'id': 21939, 'synset': 'notch.n.01', 'name': 'notch'}, {'id': 21940, 'synset': 'wrinkle.n.01', 'name': 'wrinkle'}, {'id': 21941, 'synset': 'dermatoglyphic.n.01', 'name': 'dermatoglyphic'}, {'id': 21942, 'synset': 'frown_line.n.01', 'name': 'frown_line'}, {'id': 21943, 'synset': 'line_of_life.n.01', 'name': 'line_of_life'}, {'id': 21944, 'synset': 'line_of_heart.n.01', 'name': 'line_of_heart'}, {'id': 21945, 'synset': 'crevice.n.01', 'name': 'crevice'}, {'id': 21946, 'synset': 'cleft.n.01', 'name': 'cleft'}, {'id': 21947, 'synset': 'roulette.n.01', 'name': 'roulette'}, {'id': 21948, 'synset': 'node.n.01', 'name': 'node'}, {'id': 21949, 'synset': 'tree.n.02', 'name': 'tree'}, {'id': 21950, 'synset': 'stemma.n.01', 'name': 'stemma'}, {'id': 21951, 'synset': 'brachium.n.01', 'name': 'brachium'}, {'id': 21952, 'synset': 'fork.n.03', 'name': 'fork'}, {'id': 21953, 'synset': 'block.n.03', 'name': 'block'}, {'id': 21954, 'synset': 'ovoid.n.01', 'name': 'ovoid'}, {'id': 21955, 'synset': 'tetrahedron.n.01', 'name': 'tetrahedron'}, {'id': 21956, 'synset': 'pentahedron.n.01', 'name': 'pentahedron'}, {'id': 21957, 'synset': 'hexahedron.n.01', 'name': 'hexahedron'}, {'id': 21958, 'synset': 'regular_polyhedron.n.01', 'name': 'regular_polyhedron'}, {'id': 21959, 'synset': 'polyhedral_angle.n.01', 'name': 'polyhedral_angle'}, {'id': 21960, 'synset': 'cube.n.01', 'name': 'cube'}, {'id': 21961, 'synset': 'truncated_pyramid.n.01', 'name': 'truncated_pyramid'}, {'id': 21962, 'synset': 'truncated_cone.n.01', 'name': 'truncated_cone'}, {'id': 21963, 'synset': 'tail.n.03', 'name': 'tail'}, {'id': 21964, 'synset': 'tongue.n.03', 'name': 'tongue'}, {'id': 21965, 'synset': 'trapezohedron.n.01', 'name': 'trapezohedron'}, {'id': 21966, 'synset': 'wedge.n.01', 'name': 'wedge'}, {'id': 21967, 'synset': 'keel.n.01', 'name': 'keel'}, {'id': 21968, 'synset': 'place.n.06', 'name': 'place'}, {'id': 21969, 'synset': 'herpes.n.01', 'name': 'herpes'}, {'id': 21970, 'synset': 'chlamydia.n.01', 'name': 'chlamydia'}, {'id': 21971, 'synset': 'wall.n.04', 'name': 'wall'}, {'id': 21972, 'synset': 'micronutrient.n.01', 'name': 'micronutrient'}, {'id': 21973, 'synset': 'chyme.n.01', 'name': 'chyme'}, {'id': 21974, 'synset': 'ragweed_pollen.n.01', 'name': 'ragweed_pollen'}, {'id': 21975, 'synset': 'pina_cloth.n.01', 'name': 'pina_cloth'}, {'id': 21976, 'synset': 'chlorobenzylidenemalononitrile.n.01', 'name': 'chlorobenzylidenemalononitrile'}, {'id': 21977, 'synset': 'carbon.n.01', 'name': 'carbon'}, {'id': 21978, 'synset': 'charcoal.n.01', 'name': 'charcoal'}, {'id': 21979, 'synset': 'rock.n.02', 'name': 'rock'}, {'id': 21980, 'synset': 'gravel.n.01', 'name': 'gravel'}, {'id': 21981, 'synset': 'aflatoxin.n.01', 'name': 'aflatoxin'}, {'id': 21982, 'synset': 'alpha-tocopheral.n.01', 'name': 'alpha-tocopheral'}, {'id': 21983, 'synset': 'leopard.n.01', 'name': 'leopard'}, {'id': 21984, 'synset': 'bricks_and_mortar.n.01', 'name': 'bricks_and_mortar'}, {'id': 21985, 'synset': 'lagging.n.01', 'name': 'lagging'}, {'id': 21986, 'synset': 'hydraulic_cement.n.01', 'name': 'hydraulic_cement'}, {'id': 21987, 'synset': 'choline.n.01', 'name': 'choline'}, {'id': 21988, 'synset': 'concrete.n.01', 'name': 'concrete'}, {'id': 21989, 'synset': 'glass_wool.n.01', 'name': 'glass_wool'}, {'id': 21990, 'synset': 'soil.n.02', 'name': 'soil'}, {'id': 21991, 'synset': 'high_explosive.n.01', 'name': 'high_explosive'}, {'id': 21992, 'synset': 'litter.n.02', 'name': 'litter'}, {'id': 21993, 'synset': 'fish_meal.n.01', 'name': 'fish_meal'}, {'id': 21994, 'synset': 'greek_fire.n.01', 'name': 'Greek_fire'}, {'id': 21995, 'synset': 'culture_medium.n.01', 'name': 'culture_medium'}, {'id': 21996, 'synset': 'agar.n.01', 'name': 'agar'}, {'id': 21997, 'synset': 'blood_agar.n.01', 'name': 'blood_agar'}, {'id': 21998, 'synset': 'hip_tile.n.01', 'name': 'hip_tile'}, {'id': 21999, 'synset': 'hyacinth.n.01', 'name': 'hyacinth'}, {'id': 22000, 'synset': 'hydroxide_ion.n.01', 'name': 'hydroxide_ion'}, {'id': 22001, 'synset': 'ice.n.01', 'name': 'ice'}, {'id': 22002, 'synset': 'inositol.n.01', 'name': 'inositol'}, {'id': 22003, 'synset': 'linoleum.n.01', 'name': 'linoleum'}, {'id': 22004, 'synset': 'lithia_water.n.01', 'name': 'lithia_water'}, {'id': 22005, 'synset': 'lodestone.n.01', 'name': 'lodestone'}, {'id': 22006, 'synset': 'pantothenic_acid.n.01', 'name': 'pantothenic_acid'}, {'id': 22007, 'synset': 'paper.n.01', 'name': 'paper'}, {'id': 22008, 'synset': 'papyrus.n.01', 'name': 'papyrus'}, {'id': 22009, 'synset': 'pantile.n.01', 'name': 'pantile'}, {'id': 22010, 'synset': 'blacktop.n.01', 'name': 'blacktop'}, {'id': 22011, 'synset': 'tarmacadam.n.01', 'name': 'tarmacadam'}, {'id': 22012, 'synset': 'paving.n.01', 'name': 'paving'}, {'id': 22013, 'synset': 'plaster.n.01', 'name': 'plaster'}, {'id': 22014, 'synset': 'poison_gas.n.01', 'name': 'poison_gas'}, {'id': 22015, 'synset': 'ridge_tile.n.01', 'name': 'ridge_tile'}, {'id': 22016, 'synset': 'roughcast.n.01', 'name': 'roughcast'}, {'id': 22017, 'synset': 'sand.n.01', 'name': 'sand'}, {'id': 22018, 'synset': 'spackle.n.01', 'name': 'spackle'}, {'id': 22019, 'synset': 'render.n.01', 'name': 'render'}, {'id': 22020, 'synset': 'wattle_and_daub.n.01', 'name': 'wattle_and_daub'}, {'id': 22021, 'synset': 'stucco.n.01', 'name': 'stucco'}, {'id': 22022, 'synset': 'tear_gas.n.01', 'name': 'tear_gas'}, {'id': 22023, 'synset': 'linseed.n.01', 'name': 'linseed'}, {'id': 22024, 'synset': 'vitamin.n.01', 'name': 'vitamin'}, {'id': 22025, 'synset': 'fat-soluble_vitamin.n.01', 'name': 'fat-soluble_vitamin'}, {'id': 22026, 'synset': 'water-soluble_vitamin.n.01', 'name': 'water-soluble_vitamin'}, {'id': 22027, 'synset': 'vitamin_a.n.01', 'name': 'vitamin_A'}, {'id': 22028, 'synset': 'vitamin_a1.n.01', 'name': 'vitamin_A1'}, {'id': 22029, 'synset': 'vitamin_a2.n.01', 'name': 'vitamin_A2'}, {'id': 22030, 'synset': 'b-complex_vitamin.n.01', 'name': 'B-complex_vitamin'}, {'id': 22031, 'synset': 'vitamin_b1.n.01', 'name': 'vitamin_B1'}, {'id': 22032, 'synset': 'vitamin_b12.n.01', 'name': 'vitamin_B12'}, {'id': 22033, 'synset': 'vitamin_b2.n.01', 'name': 'vitamin_B2'}, {'id': 22034, 'synset': 'vitamin_b6.n.01', 'name': 'vitamin_B6'}, {'id': 22035, 'synset': 'vitamin_bc.n.01', 'name': 'vitamin_Bc'}, {'id': 22036, 'synset': 'niacin.n.01', 'name': 'niacin'}, {'id': 22037, 'synset': 'vitamin_d.n.01', 'name': 'vitamin_D'}, {'id': 22038, 'synset': 'vitamin_e.n.01', 'name': 'vitamin_E'}, {'id': 22039, 'synset': 'biotin.n.01', 'name': 'biotin'}, {'id': 22040, 'synset': 'vitamin_k.n.01', 'name': 'vitamin_K'}, {'id': 22041, 'synset': 'vitamin_k1.n.01', 'name': 'vitamin_K1'}, {'id': 22042, 'synset': 'vitamin_k3.n.01', 'name': 'vitamin_K3'}, {'id': 22043, 'synset': 'vitamin_p.n.01', 'name': 'vitamin_P'}, {'id': 22044, 'synset': 'vitamin_c.n.01', 'name': 'vitamin_C'}, {'id': 22045, 'synset': 'planking.n.01', 'name': 'planking'}, {'id': 22046, 'synset': 'chipboard.n.01', 'name': 'chipboard'}, {'id': 22047, 'synset': 'knothole.n.01', 'name': 'knothole'}] # noqa \ No newline at end of file diff --git a/spaces/bhasker412/IDD-YOLO-Tracking/trackers/reid_export.py b/spaces/bhasker412/IDD-YOLO-Tracking/trackers/reid_export.py deleted file mode 100644 index 9ef8d13c148963ce2338a17c9e9c6a24a0f6d4fb..0000000000000000000000000000000000000000 --- a/spaces/bhasker412/IDD-YOLO-Tracking/trackers/reid_export.py +++ /dev/null @@ -1,313 +0,0 @@ -import argparse - -import os -# limit the number of cpus used by high performance libraries -os.environ["OMP_NUM_THREADS"] = "1" -os.environ["OPENBLAS_NUM_THREADS"] = "1" -os.environ["MKL_NUM_THREADS"] = "1" -os.environ["VECLIB_MAXIMUM_THREADS"] = "1" -os.environ["NUMEXPR_NUM_THREADS"] = "1" - -import sys -import numpy as np -from pathlib import Path -import torch -import time -import platform -import pandas as pd -import subprocess -import torch.backends.cudnn as cudnn -from torch.utils.mobile_optimizer import optimize_for_mobile - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0].parents[0] # yolov5 strongsort root directory -WEIGHTS = ROOT / 'weights' - - -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -if str(ROOT / 'yolov5') not in sys.path: - sys.path.append(str(ROOT / 'yolov5')) # add yolov5 ROOT to PATH - -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -import logging -from ultralytics.yolo.utils.torch_utils import select_device -from ultralytics.yolo.utils import LOGGER, colorstr, ops -from ultralytics.yolo.utils.checks import check_requirements, check_version -from trackers.strongsort.deep.models import build_model -from trackers.strongsort.deep.reid_model_factory import get_model_name, load_pretrained_weights - - -def file_size(path): - # Return file/dir size (MB) - path = Path(path) - if path.is_file(): - return path.stat().st_size / 1E6 - elif path.is_dir(): - return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6 - else: - return 0.0 - - -def export_formats(): - # YOLOv5 export formats - x = [ - ['PyTorch', '-', '.pt', True, True], - ['TorchScript', 'torchscript', '.torchscript', True, True], - ['ONNX', 'onnx', '.onnx', True, True], - ['OpenVINO', 'openvino', '_openvino_model', True, False], - ['TensorRT', 'engine', '.engine', False, True], - ['TensorFlow Lite', 'tflite', '.tflite', True, False], - ] - return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) - - -def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): - # YOLOv5 TorchScript model export - try: - LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') - f = file.with_suffix('.torchscript') - - ts = torch.jit.trace(model, im, strict=False) - if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html - optimize_for_mobile(ts)._save_for_lite_interpreter(str(f)) - else: - ts.save(str(f)) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'{prefix} export failure: {e}') - - -def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): - # ONNX export - try: - check_requirements(('onnx',)) - import onnx - - f = file.with_suffix('.onnx') - LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') - - if dynamic: - dynamic = {'images': {0: 'batch'}} # shape(1,3,640,640) - dynamic['output'] = {0: 'batch'} # shape(1,25200,85) - - torch.onnx.export( - model.cpu() if dynamic else model, # --dynamic only compatible with cpu - im.cpu() if dynamic else im, - f, - verbose=False, - opset_version=opset, - do_constant_folding=True, - input_names=['images'], - output_names=['output'], - dynamic_axes=dynamic or None - ) - # Checks - model_onnx = onnx.load(f) # load onnx model - onnx.checker.check_model(model_onnx) # check onnx model - onnx.save(model_onnx, f) - - # Simplify - if simplify: - try: - cuda = torch.cuda.is_available() - check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) - import onnxsim - - LOGGER.info(f'simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify(model_onnx) - assert check, 'assert check failed' - onnx.save(model_onnx, f) - except Exception as e: - LOGGER.info(f'simplifier failure: {e}') - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'export failure: {e}') - - - -def export_openvino(file, half, prefix=colorstr('OpenVINO:')): - # YOLOv5 OpenVINO export - check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie - try: - LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') - f = str(file).replace('.pt', f'_openvino_model{os.sep}') - - cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" - subprocess.check_output(cmd.split()) # export - except Exception as e: - LOGGER.info(f'export failure: {e}') - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - - -def export_tflite(file, half, prefix=colorstr('TFLite:')): - # YOLOv5 OpenVINO export - try: - check_requirements(('openvino2tensorflow', 'tensorflow', 'tensorflow_datasets')) # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie - LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') - output = Path(str(file).replace(f'_openvino_model{os.sep}', f'_tflite_model{os.sep}')) - modelxml = list(Path(file).glob('*.xml'))[0] - cmd = f"openvino2tensorflow \ - --model_path {modelxml} \ - --model_output_path {output} \ - --output_pb \ - --output_saved_model \ - --output_no_quant_float32_tflite \ - --output_dynamic_range_quant_tflite" - subprocess.check_output(cmd.split()) # export - - LOGGER.info(f'{prefix} export success, results saved in {output} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - -def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): - # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt - try: - assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' - try: - import tensorrt as trt - except Exception: - if platform.system() == 'Linux': - check_requirements(('nvidia-tensorrt',), cmds=('-U --index-url https://pypi.ngc.nvidia.com',)) - import tensorrt as trt - - if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 - grid = model.model[-1].anchor_grid - model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] - export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 - model.model[-1].anchor_grid = grid - else: # TensorRT >= 8 - check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 12, dynamic, simplify) # opset 13 - onnx = file.with_suffix('.onnx') - - LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - assert onnx.exists(), f'failed to export ONNX file: {onnx}' - f = file.with_suffix('.engine') # TensorRT engine file - logger = trt.Logger(trt.Logger.INFO) - if verbose: - logger.min_severity = trt.Logger.Severity.VERBOSE - - builder = trt.Builder(logger) - config = builder.create_builder_config() - config.max_workspace_size = workspace * 1 << 30 - # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice - - flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) - network = builder.create_network(flag) - parser = trt.OnnxParser(network, logger) - if not parser.parse_from_file(str(onnx)): - raise RuntimeError(f'failed to load ONNX file: {onnx}') - - inputs = [network.get_input(i) for i in range(network.num_inputs)] - outputs = [network.get_output(i) for i in range(network.num_outputs)] - LOGGER.info(f'{prefix} Network Description:') - for inp in inputs: - LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') - for out in outputs: - LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') - - if dynamic: - if im.shape[0] <= 1: - LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument") - profile = builder.create_optimization_profile() - for inp in inputs: - profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) - config.add_optimization_profile(profile) - - LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') - if builder.platform_has_fast_fp16 and half: - config.set_flag(trt.BuilderFlag.FP16) - with builder.build_engine(network, config) as engine, open(f, 'wb') as t: - t.write(engine.serialize()) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser(description="ReID export") - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[256, 128], help='image (h, w)') - parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') - parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') - parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') - parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') - parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') - parser.add_argument('--weights', nargs='+', type=str, default=WEIGHTS / 'osnet_x0_25_msmt17.pt', help='model.pt path(s)') - parser.add_argument('--half', action='store_true', help='FP16 half-precision export') - parser.add_argument('--include', - nargs='+', - default=['torchscript'], - help='torchscript, onnx, openvino, engine') - args = parser.parse_args() - - t = time.time() - - include = [x.lower() for x in args.include] # to lowercase - fmts = tuple(export_formats()['Argument'][1:]) # --include arguments - flags = [x in include for x in fmts] - assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}' - jit, onnx, openvino, engine, tflite = flags # export booleans - - args.device = select_device(args.device) - if args.half: - assert args.device.type != 'cpu', '--half only compatible with GPU export, i.e. use --device 0' - assert not args.dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both' - - if type(args.weights) is list: - args.weights = Path(args.weights[0]) - - model = build_model( - get_model_name(args.weights), - num_classes=1, - pretrained=not (args.weights and args.weights.is_file() and args.weights.suffix == '.pt'), - use_gpu=args.device - ).to(args.device) - load_pretrained_weights(model, args.weights) - model.eval() - - if args.optimize: - assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu' - - im = torch.zeros(args.batch_size, 3, args.imgsz[0], args.imgsz[1]).to(args.device) # image size(1,3,640,480) BCHW iDetection - for _ in range(2): - y = model(im) # dry runs - if args.half: - im, model = im.half(), model.half() # to FP16 - shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape - LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {args.weights} with output shape {shape} ({file_size(args.weights):.1f} MB)") - - # Exports - f = [''] * len(fmts) # exported filenames - if jit: - f[0] = export_torchscript(model, im, args.weights, args.optimize) # opset 12 - if engine: # TensorRT required before ONNX - f[1] = export_engine(model, im, args.weights, args.half, args.dynamic, args.simplify, args.workspace, args.verbose) - if onnx: # OpenVINO requires ONNX - f[2] = export_onnx(model, im, args.weights, args.opset, args.dynamic, args.simplify) # opset 12 - if openvino: - f[3] = export_openvino(args.weights, args.half) - if tflite: - export_tflite(f, False) - - # Finish - f = [str(x) for x in f if x] # filter out '' and None - if any(f): - LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' - f"\nResults saved to {colorstr('bold', args.weights.parent.resolve())}" - f"\nVisualize: https://netron.app") - diff --git a/spaces/bigscience/ethical-charter/charter.html b/spaces/bigscience/ethical-charter/charter.html deleted file mode 100644 index a23ed49b7bf753901820461f845772de2ecac439..0000000000000000000000000000000000000000 --- a/spaces/bigscience/ethical-charter/charter.html +++ /dev/null @@ -1 +0,0 @@ -

    BigScience Ethical Charter

     

    Preamble

    Introduction

    The development and applications of research in NLP are advancing rapidly, with direct real-world consequences. As a result, possible societal benefits exist, but related risks also increase considerably. Aware of these potential challenges, BigScience drafted an ethical charter formalizing its core values and how they are articulated.

    Scope

    The scope of this ethical charter is threefold:

    1.    To establish the core values of BigScience in order to allow its contributors to commit to them, both individually and collectively.

    2.    To serve as a pivot for drafting BigScience documents intended to frame specific issues ethically and legally.

    3.    To enable Big Science to promote values within the research community through scientific publication, dissemination, and popularization.

    People concerned

    The members of BigScience hold the values stated in this ethical charter. As ethical guidelines, they apply to any activities and documents governing a specific aspect of the project.

    Limitations of this ethical charter

    Given the breadth of the scope of BigScience and thriving to seek progress in NLP research, we recognize that not all scientific research will have a positive impact on society. It is difficult to predict all the uses the scientific community will make of our artifacts. Therefore, we defer to our license and model card for further information.

    Relevance over time

    We interpret ethics as an ongoing process, not a time-fixed code with universal validity. For these reasons, when needed, BigScience will review, update and adapt the ethical charter from time to time.

    Legitimacy

    The elaboration of this ethical charter results from a bottom-up collaboration that tried to collect all the different thoughts and opinions of BigScience participants. Then, experts in applied ethics and law did a final revision. We aim for consensus: if any BigScience member individually does not feel aligned with one or more of the values inscribed in this ethical charter, the member will have the right to object at appropriate times and places to that end.

    Ethical approach

    We assume the basis of value pluralism within our community, and we cherish it. That is why the ethical notion of harmony () in Confucian moral theory seemed to be the appropriate approach for such an international and interdisciplinary scientific community as BigScience. “Harmony is by its very nature relational. It presupposes the coexistence of multiple parties; […] harmony is always contextual; epistemologically it calls for a holistic approach[1].”

    Ethical compliance

    We distinguish two levels of ethical compliance operating within the charter: individual and collective. We are held accountable for ethical compliance both as individual BigScience contributors and as a collective research entity.

    Other documents articulation

    Given the pivotal function of this ethical charter, we will refer to the other BigScience documents intended to govern specific issues directly where needed in the relevant paragraph.

    BigScience Values

     

    We apply the distinction between intrinsic and extrinsic values in the structure of this ethical charter. The former refers to “what is valuable for its own sake, in itself […], as an end[2]”; the latter is characterized as “what is valuable as a means, or for something else’s work[3]”. We distinguish between intrinsic and extrinsic values because the latter can vary more efficiently to achieve the former goals: the latter are substitutable. This structure will help the reader understand how the two types of values combine and allow the BigScience community to adapt this ethical charter over time.

    Intrinsic Values

     

    • Inclusivity

    We work to ensure welcomeness in the process and equal access to the BigScience artifacts without any form of discrimination (e.g., religion, ethnicity, sexual orientation, gender, political orientation, age, ability). We believe that “inclusivity” is not just non-discrimination, but also a sense of belonging.

    • Diversity

    The BigScience community has over 900 researchers and communities (see some listed collaborations here) from 50 countries covering over 20 languages. The collaborators bring together their expertise from various sources of knowledge, scientific fields, and institutional contexts (academia, industry, research institutions, etc).

    • Reproducibility 

    The BigScience project was born with the clear intention of being a research initiative devoted to open science. BigScience aims at ensuring the reproduction of the research experiments and scientific conclusions developed under its aegis.

    • Openness

    Openness takes two dimensions, one focused on the process, and the other focused on its result. BigScience aims to be an open science framework whereby NLP, and broadly, AI-related researchers from all over the world can contribute and join the initiative.

    With regards to the results of our research, such as the future Large Language Model, these are created by the research community to the research community, and therefore will be released on an open basis, taking into account the risks derived from the use of the model.

    • Responsibility

    Each contributor has both an individual and a collective responsibility for their work within the BigScience project. This responsibility is both social and environmental. BigScience intends to positively impact stakeholders through its artifacts regarding the former. Concerning the latter, BigScience is committed to developing tools to monitor and lower its artifacts’ carbon footprint and energy consumption.

    Moreover, other tools such as an open legal playbook for NLP researchers guiding them regarding the use and respect of IP and privacy rights also seek to promote responsibility around the scientific community.

    Extrinsic Values

    • Accessibility

    As a means to achieve openness

    BigScience puts in its best efforts to make our research and technological outputs easily interpretable and explained to the wider public, outside the scientific community, especially to communities that have participated in data sharing.

    Currently instrumentalized in:

    • no-code tools for exploring the catalog, trained models, etc.
    • translating our calls for participation (in the data sourcing group)
    • journalism (articles published on the project)
    • linked to multidisciplinarity - legal hackathon as a step toward “non-technical” presentation
    • Transparency

    As a means to achieve reproducibility

    BigScience work is actively promoted at various conferences, webinars, academic research, and scientific popularization so others can see our work.

    We have set up a management framework to oversee the use of BigScience models, datasets, and tools, e.g. through working groups. 

    All BigScience internal meetings and work progress are publicly shared within the Community, e.g. through public episodes.

    We are committed to building tools to interpret, monitor, explain, and make intelligible the artifacts developed by BigScience.

    • Interdisciplinarity

    As a means to achieve inclusivity

    We are constantly building bridges among computer science, linguistics, law, sociology, philosophy, and other relevant disciplines in order to adopt a holistic approach in developing BigScience artifacts.

    • Multilingualism

    As a means to achieve diversity

    By having a system that is multilingual from its conception, with the immediate goal of covering the 20 most spoken languages in the world and a broad reach to include up to hundreds based on collaborations with native speakers, we aim to reduce existing disparities in language and foster a more equitable distribution of the benefits of our artifacts.


    [1] Chenyang Li, “The Confucian Ideal of Harmony”, in Philosophy East and West, vol. 56, no. 4, 2006, p. 589.

    [2] Chris Heathwood, “Monism and pluralism about value”, in The Oxford Handbook of Value Theory, Iwao Hires and Jonas Olson (ed.), Oxford University Press, Oxford, 2015, p. 29.

    [3] Ibid.

    \ No newline at end of file diff --git a/spaces/bigscience/petals-api/src/bloom/__init__.py b/spaces/bigscience/petals-api/src/bloom/__init__.py deleted file mode 100644 index d42bf511dfde085ee1502891743b740a86a6ed5f..0000000000000000000000000000000000000000 --- a/spaces/bigscience/petals-api/src/bloom/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from src.bloom.block import BloomBlock -from src.bloom.model import BloomConfig, BloomForCausalLM, BloomModel, BloomPreTrainedModel diff --git a/spaces/bioriAsaeru/text-to-voice/Golmaal 2006 Full Movie HD Free 49 Stream the Funniest Bollywood Film of the Year.md b/spaces/bioriAsaeru/text-to-voice/Golmaal 2006 Full Movie HD Free 49 Stream the Funniest Bollywood Film of the Year.md deleted file mode 100644 index 434dbd6bfdef0f9718c2e314fd43e84ac5a9f896..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Golmaal 2006 Full Movie HD Free 49 Stream the Funniest Bollywood Film of the Year.md +++ /dev/null @@ -1,5 +0,0 @@ -
    -

    Devgn began his professional career with Phool Aur Kaante in 1991.[3][4] He then rose to prominence as an action hero starring in successful films such as Jigar (1992), Dilwale (1994), Diljale (1996). He went on to give critically acclaimed performances in Hum Dil De Chuke Sanam (1999), Company (2002), Deewangee (2002), for Zakhm (1998) and The Legend of Bhagat Singh (2002), he won National Film Award for Best Actor. After the success of Golmaal: Fun Unlimited (2006), he went on to collaborate with Rohit Shetty on a number of action-comedies including Golmaal Returns (2008), All the Best: Fun Begins (2009), Golmaal 3 (2010), Singham (2011), Bol Bachchan (2012), Singham Returns (2014), and Golmaal Again (2017). His highest-grossing movies include Total Dhamaal (2019), Tanhaji (2020) and Drishyam 2 (2022).[5]

    -

    golmaal 2006 full movie hd free 49


    DOWNLOAD ··· https://urloso.com/2uyR6B



    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/birdortyedi/cifr-pytorch/modeling/base.py b/spaces/birdortyedi/cifr-pytorch/modeling/base.py deleted file mode 100644 index 546427a1e9f91fceecea94913b23e46fc1787289..0000000000000000000000000000000000000000 --- a/spaces/birdortyedi/cifr-pytorch/modeling/base.py +++ /dev/null @@ -1,60 +0,0 @@ -from torch import nn - - -class BaseNetwork(nn.Module): - def __init__(self): - super(BaseNetwork, self).__init__() - - def forward(self, x, y): - pass - - def print_network(self): - if isinstance(self, list): - self = self[0] - num_params = 0 - for param in self.parameters(): - num_params += param.numel() - print('Network [%s] was created. Total number of parameters: %.1f million. ' - 'To see the architecture, do print(network).' - % (type(self).__name__, num_params / 1000000)) - - def set_requires_grad(self, requires_grad=False): - """Set requies_grad=Fasle for all the networks to avoid unnecessary computations - Parameters: - requires_grad (bool) -- whether the networks require gradients or not - """ - for param in self.parameters(): - param.requires_grad = requires_grad - - def init_weights(self, init_type='xavier', gain=0.02): - def init_func(m): - classname = m.__class__.__name__ - if classname.find('BatchNorm2d') != -1: - if hasattr(m, 'weight') and m.weight is not None: - nn.init.normal_(m.weight.data, 1.0, gain) - if hasattr(m, 'bias') and m.bias is not None: - nn.init.constant_(m.bias.data, 0.0) - elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): - if init_type == 'normal': - nn.init.normal_(m.weight.data, 0.0, gain) - elif init_type == 'xavier': - nn.init.xavier_normal_(m.weight.data, gain=gain) - elif init_type == 'xavier_uniform': - nn.init.xavier_uniform_(m.weight.data, gain=1.0) - elif init_type == 'kaiming': - nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') - elif init_type == 'orthogonal': - nn.init.orthogonal_(m.weight.data, gain=gain) - elif init_type == 'none': # uses pytorch's default init method - m.reset_parameters() - else: - raise NotImplementedError('initialization method [%s] is not implemented' % init_type) - if hasattr(m, 'bias') and m.bias is not None: - nn.init.constant_(m.bias.data, 0.0) - - self.apply(init_func) - - # propagate to children - for m in self.children(): - if hasattr(m, 'init_weights'): - m.init_weights(init_type, gain) diff --git a/spaces/bkhmsi/AraPoet/README.md b/spaces/bkhmsi/AraPoet/README.md deleted file mode 100644 index 6a8cd79d4065e8bb44ed10769c7e6110b6b0ec2a..0000000000000000000000000000000000000000 --- a/spaces/bkhmsi/AraPoet/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AraPoet -emoji: ✍️ -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false -license: gpl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/bookbot/Wikipedia-Scraper/README.md b/spaces/bookbot/Wikipedia-Scraper/README.md deleted file mode 100644 index d7b047a8b236ccd2629dcf89010ca8ae0dcd643c..0000000000000000000000000000000000000000 --- a/spaces/bookbot/Wikipedia-Scraper/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Wikipedia Scraper -emoji: 🐳 -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 3.1.7 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/botlik100/kaki/README.md b/spaces/botlik100/kaki/README.md deleted file mode 100644 index 9cb518590fc64557b6d76c297dedb3bb75e3b3a9..0000000000000000000000000000000000000000 --- a/spaces/botlik100/kaki/README.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: RVC V2 -emoji: 💻 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.42.0 -app_file: app.py -pinned: false -license: lgpl-3.0 ---- - -## 🔧 Pre-requisites - -Before running the project, you must have the following tool installed on your machine: -* [Python v3.8.0](https://www.python.org/downloads/release/python-380/) - -Also, you will need to clone the repository: - -```bash -# Clone the repository -git clone https://huggingface.co/spaces/mateuseap/magic-vocals/ -# Enter in the root directory -cd magic-vocals -``` - -## 🚀 How to run - -After you've cloned the repository and entered in the root directory, run the following commands: - -```bash -# Create and activate a Virtual Environment (make sure you're using Python v3.8.0 to do it) -python -m venv venv -. venv/bin/activate - -# Change mode and execute a shell script to configure and run the application -chmod +x run.sh -./run.sh -``` - -After the shell script executes everything, the application will be running at http://127.0.0.1:7860! Open up the link in a browser to use the app: - -![Magic Vocals](https://i.imgur.com/V55oKv8.png) - -**You only need to execute the `run.sh` one time**, once you've executed it one time, you just need to activate the virtual environment and run the command below to start the app again: - -```bash -python app.py -``` - -**THE `run.sh` IS SUPPORTED BY THE FOLLOWING OPERATING SYSTEMS:** - - -| OS | Supported | -|-----------|:---------:| -| `Windows` | ❌ | -| `Ubuntu` | ✅ | \ No newline at end of file diff --git a/spaces/brjathu/HMR2.0/vendor/pyrender/pyrender/platforms/pyglet_platform.py b/spaces/brjathu/HMR2.0/vendor/pyrender/pyrender/platforms/pyglet_platform.py deleted file mode 100644 index a70cf7b659bc85a92f6c9c8ebcc360662a068507..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/pyrender/pyrender/platforms/pyglet_platform.py +++ /dev/null @@ -1,90 +0,0 @@ -from pyrender.constants import (TARGET_OPEN_GL_MAJOR, TARGET_OPEN_GL_MINOR, - MIN_OPEN_GL_MAJOR, MIN_OPEN_GL_MINOR) -from .base import Platform - -import OpenGL - - -__all__ = ['PygletPlatform'] - - -class PygletPlatform(Platform): - """Renders on-screen using a 1x1 hidden Pyglet window for getting - an OpenGL context. - """ - - def __init__(self, viewport_width, viewport_height): - super(PygletPlatform, self).__init__(viewport_width, viewport_height) - self._window = None - - def init_context(self): - import pyglet - pyglet.options['shadow_window'] = False - - try: - pyglet.lib.x11.xlib.XInitThreads() - except Exception: - pass - - self._window = None - confs = [pyglet.gl.Config(sample_buffers=1, samples=4, - depth_size=24, - double_buffer=True, - major_version=TARGET_OPEN_GL_MAJOR, - minor_version=TARGET_OPEN_GL_MINOR), - pyglet.gl.Config(depth_size=24, - double_buffer=True, - major_version=TARGET_OPEN_GL_MAJOR, - minor_version=TARGET_OPEN_GL_MINOR), - pyglet.gl.Config(sample_buffers=1, samples=4, - depth_size=24, - double_buffer=True, - major_version=MIN_OPEN_GL_MAJOR, - minor_version=MIN_OPEN_GL_MINOR), - pyglet.gl.Config(depth_size=24, - double_buffer=True, - major_version=MIN_OPEN_GL_MAJOR, - minor_version=MIN_OPEN_GL_MINOR)] - for conf in confs: - try: - self._window = pyglet.window.Window(config=conf, visible=False, - resizable=False, - width=1, height=1) - break - except pyglet.window.NoSuchConfigException as e: - pass - - if not self._window: - raise ValueError( - 'Failed to initialize Pyglet window with an OpenGL >= 3+ ' - 'context. If you\'re logged in via SSH, ensure that you\'re ' - 'running your script with vglrun (i.e. VirtualGL). The ' - 'internal error message was "{}"'.format(e) - ) - - def make_current(self): - if self._window: - self._window.switch_to() - - def make_uncurrent(self): - try: - import pyglet - pyglet.gl.xlib.glx.glXMakeContextCurrent(self._window.context.x_display, 0, 0, None) - except Exception: - pass - - def delete_context(self): - if self._window is not None: - self.make_current() - cid = OpenGL.contextdata.getContext() - try: - self._window.context.destroy() - self._window.close() - except Exception: - pass - self._window = None - OpenGL.contextdata.cleanupContext(cid) - del cid - - def supports_framebuffers(self): - return True diff --git a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/README.md b/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/README.md deleted file mode 100644 index ab43077b59d073de727d5ae7f9cd5b5eee28592d..0000000000000000000000000000000000000000 --- a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: S1000 Veri Toplama -emoji: 👁 -colorFrom: yellow -colorTo: green -sdk: gradio -sdk_version: 3.1.0 -app_file: app.py -pinned: false -license: osl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/Panoptic-DeepLab/panoptic_deeplab/dataset_mapper.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/Panoptic-DeepLab/panoptic_deeplab/dataset_mapper.py deleted file mode 100644 index 53272c726af810efc248f2428dda7ca7271fcd00..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/Panoptic-DeepLab/panoptic_deeplab/dataset_mapper.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import copy -import logging -import numpy as np -from typing import Callable, List, Union -import torch -from panopticapi.utils import rgb2id - -from detectron2.config import configurable -from detectron2.data import MetadataCatalog -from detectron2.data import detection_utils as utils -from detectron2.data import transforms as T - -from .target_generator import PanopticDeepLabTargetGenerator - -__all__ = ["PanopticDeeplabDatasetMapper"] - - -class PanopticDeeplabDatasetMapper: - """ - The callable currently does the following: - - 1. Read the image from "file_name" and label from "pan_seg_file_name" - 2. Applies random scale, crop and flip transforms to image and label - 3. Prepare data to Tensor and generate training targets from label - """ - - @configurable - def __init__( - self, - *, - augmentations: List[Union[T.Augmentation, T.Transform]], - image_format: str, - panoptic_target_generator: Callable, - ): - """ - NOTE: this interface is experimental. - - Args: - augmentations: a list of augmentations or deterministic transforms to apply - image_format: an image format supported by :func:`detection_utils.read_image`. - panoptic_target_generator: a callable that takes "panoptic_seg" and - "segments_info" to generate training targets for the model. - """ - # fmt: off - self.augmentations = T.AugmentationList(augmentations) - self.image_format = image_format - # fmt: on - logger = logging.getLogger(__name__) - logger.info("Augmentations used in training: " + str(augmentations)) - - self.panoptic_target_generator = panoptic_target_generator - - @classmethod - def from_config(cls, cfg): - augs = [ - T.ResizeShortestEdge( - cfg.INPUT.MIN_SIZE_TRAIN, - cfg.INPUT.MAX_SIZE_TRAIN, - cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING, - ) - ] - if cfg.INPUT.CROP.ENABLED: - augs.append(T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) - augs.append(T.RandomFlip()) - - # Assume always applies to the training set. - dataset_names = cfg.DATASETS.TRAIN - meta = MetadataCatalog.get(dataset_names[0]) - panoptic_target_generator = PanopticDeepLabTargetGenerator( - ignore_label=meta.ignore_label, - thing_ids=list(meta.thing_dataset_id_to_contiguous_id.values()), - sigma=cfg.INPUT.GAUSSIAN_SIGMA, - ignore_stuff_in_offset=cfg.INPUT.IGNORE_STUFF_IN_OFFSET, - small_instance_area=cfg.INPUT.SMALL_INSTANCE_AREA, - small_instance_weight=cfg.INPUT.SMALL_INSTANCE_WEIGHT, - ignore_crowd_in_semantic=cfg.INPUT.IGNORE_CROWD_IN_SEMANTIC, - ) - - ret = { - "augmentations": augs, - "image_format": cfg.INPUT.FORMAT, - "panoptic_target_generator": panoptic_target_generator, - } - return ret - - def __call__(self, dataset_dict): - """ - Args: - dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. - - Returns: - dict: a format that builtin models in detectron2 accept - """ - dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below - # Load image. - image = utils.read_image(dataset_dict["file_name"], format=self.image_format) - utils.check_image_size(dataset_dict, image) - # Panoptic label is encoded in RGB image. - pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB") - - # Reuses semantic transform for panoptic labels. - aug_input = T.AugInput(image, sem_seg=pan_seg_gt) - _ = self.augmentations(aug_input) - image, pan_seg_gt = aug_input.image, aug_input.sem_seg - - # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, - # but not efficient on large generic data structures due to the use of pickle & mp.Queue. - # Therefore it's important to use torch.Tensor. - dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) - - # Generates training targets for Panoptic-DeepLab. - targets = self.panoptic_target_generator(rgb2id(pan_seg_gt), dataset_dict["segments_info"]) - dataset_dict.update(targets) - - return dataset_dict diff --git a/spaces/ccds/vits_onnx/export/vits/export_onnx.py b/spaces/ccds/vits_onnx/export/vits/export_onnx.py deleted file mode 100644 index 1834d24bc581ee43af038c2f56668d7a8bb1e425..0000000000000000000000000000000000000000 --- a/spaces/ccds/vits_onnx/export/vits/export_onnx.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright (c) 2022, Yongqiang Li (yongqiangli@alumni.hust.edu.cn) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import json -import os -import sys - -import torch - -from models import SynthesizerTrn -import utils - -try: - import onnxruntime as ort -except ImportError: - print('Please install onnxruntime!') - sys.exit(1) - - -def to_numpy(tensor): - return tensor.detach().cpu().numpy() if tensor.requires_grad \ - else tensor.detach().numpy() - - -def get_args(): - parser = argparse.ArgumentParser(description='export onnx model') - parser.add_argument('--checkpoint', required=True, help='checkpoint') - parser.add_argument('--cfg', required=True, help='config file') - parser.add_argument('--onnx_model', required=True, help='onnx model name') - # parser.add_argument('--phone_table', - # required=True, - # help='input phone dict') - # parser.add_argument('--speaker_table', default=None, help='speaker table') - # parser.add_argument("--speaker_num", required=True, - # type=int, help="speaker num") - parser.add_argument( - '--providers', - required=False, - default='CPUExecutionProvider', - choices=['CUDAExecutionProvider', 'CPUExecutionProvider'], - help='the model to send request to') - args = parser.parse_args() - return args - - -def get_data_from_cfg(cfg_path: str): - assert os.path.isfile(cfg_path) - with open(cfg_path, 'r') as f: - data = json.load(f) - symbols = data["symbols"] - speaker_num = data["data"]["n_speakers"] - return len(symbols), speaker_num - - -def main(): - args = get_args() - os.environ['CUDA_VISIBLE_DEVICES'] = '0' - - hps = utils.get_hparams_from_file(args.cfg) - # with open(args.phone_table) as p_f: - # phone_num = len(p_f.readlines()) + 1 - # num_speakers = 1 - # if args.speaker_table is not None: - # num_speakers = len(open(args.speaker_table).readlines()) + 1 - phone_num, num_speakers = get_data_from_cfg(args.cfg) - net_g = SynthesizerTrn(phone_num, - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=num_speakers, - **hps.model) - utils.load_checkpoint(args.checkpoint, net_g, None) - net_g.forward = net_g.export_forward - net_g.eval() - - seq = torch.randint(low=0, high=phone_num, size=(1, 10), dtype=torch.long) - seq_len = torch.IntTensor([seq.size(1)]).long() - - # noise(可用于控制感情等变化程度) lenth(可用于控制整体语速) noisew(控制音素发音长度变化程度) - # 参考 https://github.com/gbxh/genshinTTS - scales = torch.FloatTensor([0.667, 1.0, 0.8]) - # make triton dynamic shape happy - scales = scales.unsqueeze(0) - sid = torch.IntTensor([0]).long() - - dummy_input = (seq, seq_len, scales, sid) - torch.onnx.export(model=net_g, - args=dummy_input, - f=args.onnx_model, - input_names=['input', 'input_lengths', 'scales', 'sid'], - output_names=['output'], - dynamic_axes={ - 'input': { - 0: 'batch', - 1: 'phonemes' - }, - 'input_lengths': { - 0: 'batch' - }, - 'scales': { - 0: 'batch' - }, - 'sid': { - 0: 'batch' - }, - 'output': { - 0: 'batch', - 1: 'audio', - 2: 'audio_length' - } - }, - opset_version=13, - verbose=False) - - # Verify onnx precision - torch_output = net_g(seq, seq_len, scales, sid) - providers = [args.providers] - ort_sess = ort.InferenceSession(args.onnx_model, providers=providers) - ort_inputs = { - 'input': to_numpy(seq), - 'input_lengths': to_numpy(seq_len), - 'scales': to_numpy(scales), - 'sid': to_numpy(sid), - } - onnx_output = ort_sess.run(None, ort_inputs) - - -if __name__ == '__main__': - main() diff --git a/spaces/chendl/compositional_test/transformers/examples/pytorch/benchmarking/plot_csv_file.py b/spaces/chendl/compositional_test/transformers/examples/pytorch/benchmarking/plot_csv_file.py deleted file mode 100644 index 9a9ad9c670470e1f3231d90c7fd375566e2fb8ee..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/pytorch/benchmarking/plot_csv_file.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import csv -from collections import defaultdict -from dataclasses import dataclass, field -from typing import List, Optional - -import matplotlib.pyplot as plt -import numpy as np -from matplotlib.ticker import ScalarFormatter - -from transformers import HfArgumentParser - - -def list_field(default=None, metadata=None): - return field(default_factory=lambda: default, metadata=metadata) - - -@dataclass -class PlotArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. - """ - - csv_file: str = field( - metadata={"help": "The csv file to plot."}, - ) - plot_along_batch: bool = field( - default=False, - metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."}, - ) - is_time: bool = field( - default=False, - metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."}, - ) - no_log_scale: bool = field( - default=False, - metadata={"help": "Disable logarithmic scale when plotting"}, - ) - is_train: bool = field( - default=False, - metadata={ - "help": "Whether the csv file has training results or inference results. Defaults to inference results." - }, - ) - figure_png_file: Optional[str] = field( - default=None, - metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."}, - ) - short_model_names: Optional[List[str]] = list_field( - default=None, metadata={"help": "List of model names that are used instead of the ones in the csv file."} - ) - - -def can_convert_to_int(string): - try: - int(string) - return True - except ValueError: - return False - - -def can_convert_to_float(string): - try: - float(string) - return True - except ValueError: - return False - - -class Plot: - def __init__(self, args): - self.args = args - self.result_dict = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}}) - - with open(self.args.csv_file, newline="") as csv_file: - reader = csv.DictReader(csv_file) - for row in reader: - model_name = row["model"] - self.result_dict[model_name]["bsz"].append(int(row["batch_size"])) - self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"])) - if can_convert_to_int(row["result"]): - # value is not None - self.result_dict[model_name]["result"][ - (int(row["batch_size"]), int(row["sequence_length"])) - ] = int(row["result"]) - elif can_convert_to_float(row["result"]): - # value is not None - self.result_dict[model_name]["result"][ - (int(row["batch_size"]), int(row["sequence_length"])) - ] = float(row["result"]) - - def plot(self): - fig, ax = plt.subplots() - title_str = "Time usage" if self.args.is_time else "Memory usage" - title_str = title_str + " for training" if self.args.is_train else title_str + " for inference" - - if not self.args.no_log_scale: - # set logarithm scales - ax.set_xscale("log") - ax.set_yscale("log") - - for axis in [ax.xaxis, ax.yaxis]: - axis.set_major_formatter(ScalarFormatter()) - - for model_name_idx, model_name in enumerate(self.result_dict.keys()): - batch_sizes = sorted(set(self.result_dict[model_name]["bsz"])) - sequence_lengths = sorted(set(self.result_dict[model_name]["seq_len"])) - results = self.result_dict[model_name]["result"] - - (x_axis_array, inner_loop_array) = ( - (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) - ) - - label_model_name = ( - model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] - ) - - for inner_loop_value in inner_loop_array: - if self.args.plot_along_batch: - y_axis_array = np.asarray( - [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results], - dtype=int, - ) - else: - y_axis_array = np.asarray( - [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results], - dtype=np.float32, - ) - - (x_axis_label, inner_loop_label) = ( - ("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz") - ) - - x_axis_array = np.asarray(x_axis_array, int)[: len(y_axis_array)] - plt.scatter( - x_axis_array, y_axis_array, label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" - ) - plt.plot(x_axis_array, y_axis_array, "--") - - title_str += f" {label_model_name} vs." - - title_str = title_str[:-4] - y_axis_label = "Time in s" if self.args.is_time else "Memory in MB" - - # plot - plt.title(title_str) - plt.xlabel(x_axis_label) - plt.ylabel(y_axis_label) - plt.legend() - - if self.args.figure_png_file is not None: - plt.savefig(self.args.figure_png_file) - else: - plt.show() - - -def main(): - parser = HfArgumentParser(PlotArguments) - plot_args = parser.parse_args_into_dataclasses()[0] - plot = Plot(args=plot_args) - plot.plot() - - -if __name__ == "__main__": - main() diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/luke/luke_utils.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/luke/luke_utils.py deleted file mode 100644 index aec4133f21b36eee313a5c6371ff48537ccf613c..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/luke/luke_utils.py +++ /dev/null @@ -1,115 +0,0 @@ -import unicodedata -from dataclasses import dataclass -from typing import Optional, Union - -import numpy as np - -from transformers.data.data_collator import DataCollatorMixin -from transformers.file_utils import PaddingStrategy -from transformers.tokenization_utils_base import PreTrainedTokenizerBase - - -def padding_tensor(sequences, padding_value, padding_side, sequence_length): - if isinstance(padding_value, tuple): - out_tensor = np.full((len(sequences), sequence_length, 2), padding_value) - else: - out_tensor = np.full((len(sequences), sequence_length), padding_value) - - for i, tensor in enumerate(sequences): - if padding_side == "right": - if isinstance(padding_value, tuple): - out_tensor[i, : len(tensor[:sequence_length]), :2] = tensor[:sequence_length] - else: - out_tensor[i, : len(tensor[:sequence_length])] = tensor[:sequence_length] - else: - if isinstance(padding_value, tuple): - out_tensor[i, len(tensor[:sequence_length]) - 1 :, :2] = tensor[:sequence_length] - else: - out_tensor[i, len(tensor[:sequence_length]) - 1 :] = tensor[:sequence_length] - - return out_tensor.tolist() - - -def is_punctuation(char): - cp = ord(char) - if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): - return True - cat = unicodedata.category(char) - if cat.startswith("P"): - return True - return False - - -@dataclass -class DataCollatorForLukeTokenClassification(DataCollatorMixin): - """ - Data collator that will dynamically pad the inputs received, as well as the labels. - - Args: - tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]): - The tokenizer used for encoding the data. - padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `True`): - Select a strategy to pad the returned sequences (according to the model's padding side and padding index) - among: - - - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single - sequence if provided). - - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the - maximum acceptable input length for the model if that argument is not provided. - - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of - different lengths). - max_length (`int`, *optional*): - Maximum length of the returned list and optionally padding length (see above). - pad_to_multiple_of (`int`, *optional*): - If set will pad the sequence to a multiple of the provided value. - - This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= - 7.5 (Volta). - label_pad_token_id (`int`, *optional*, defaults to -100): - The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions). - return_tensors (`str`): - The type of Tensor to return. Allowable values are "np", "pt" and "tf". - """ - - tokenizer: PreTrainedTokenizerBase - padding: Union[bool, str, PaddingStrategy] = True - max_length: Optional[int] = None - pad_to_multiple_of: Optional[int] = None - label_pad_token_id: int = -100 - return_tensors: str = "pt" - - def torch_call(self, features): - import torch - - label_name = "label" if "label" in features[0].keys() else "labels" - labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None - batch = self.tokenizer.pad( - features, - padding=self.padding, - max_length=self.max_length, - pad_to_multiple_of=self.pad_to_multiple_of, - # Conversion to tensors will fail if we have labels as they are not of the same length yet. - return_tensors="pt" if labels is None else None, - ) - - if labels is None: - return batch - - sequence_length = torch.tensor(batch["entity_ids"]).shape[1] - padding_side = self.tokenizer.padding_side - if padding_side == "right": - batch[label_name] = [ - list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels - ] - else: - batch[label_name] = [ - [self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels - ] - - ner_tags = [feature["ner_tags"] for feature in features] - batch["ner_tags"] = padding_tensor(ner_tags, -1, padding_side, sequence_length) - original_entity_spans = [feature["original_entity_spans"] for feature in features] - batch["original_entity_spans"] = padding_tensor(original_entity_spans, (-1, -1), padding_side, sequence_length) - batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()} - - return batch diff --git a/spaces/chewing/liandan/app.py b/spaces/chewing/liandan/app.py deleted file mode 100644 index 76968563528bb6d0ce3c424b833ca0800638d518..0000000000000000000000000000000000000000 --- a/spaces/chewing/liandan/app.py +++ /dev/null @@ -1,135 +0,0 @@ -import gradio as gr -from src.check_backpack import sort_yaocai -from src.gr_func import init,get_medicines,get_first_material,get_second_material,get_possible_material,get_basename - -medicine_list_init = init() - -def medicine_select_acc_change_b(medicine_select_acc): - medicine_list = get_medicines(medicine_select_acc) - return gr.Dropdown.update(choices=["无"]+medicine_list,value=medicine_list[0]) - -def check_backpack(text,medicine_select,material_num): - yaocai_list = sort_yaocai(text,medicine_select,material_num) - rtn = [[] for _ in range(9)] - for name,grade,num,flag in yaocai_list: - rtn[grade-1].append((f"{name}*{num}",flag)) - rtn = list(map(lambda x:gr.HighlightedText.update(value=x,visible=len(x)!=0),rtn)) - return rtn[-1],rtn[-2],rtn[-3],rtn[-4],rtn[-5],rtn[-6],rtn[-7],rtn[-8],rtn[-9] - -def medicine_select_acc_change(medicine_select_acc): - medicine_list = get_medicines(medicine_select_acc) - return gr.Dropdown.update(choices=medicine_list,value=medicine_list[0]) - -def run_btn_click(medicine_select,material_1_select,material_2_select): - rtn = medicine_select+"\n" - possible_material_list = get_possible_material(medicine_select,material_1_select,material_2_select) - if len(possible_material_list) == 1: - main_material, auxi_material, material_third_list = possible_material_list[0] - rtn += f"""### -- **主药**:{main_material} -- **辅药**:{auxi_material} -""" - peifang = f"配方:主药{get_basename(main_material)}药引{get_basename(material_third_list[0])}辅药{get_basename(auxi_material)}丹炉陨铁炉" - return rtn,gr.Radio.update(choices=material_third_list,value=material_third_list[0],visible=True),gr.Markdown.update(visible=True,value=peifang),(main_material,auxi_material) - # else: - - for index,(main_material,auxi_material,material_third_list) in enumerate(possible_material_list): - rtn += f"""### 选择{index+1} -- **主药**:{main_material} -- **药引**:{",".join(material_third_list)} -- **辅药**:{auxi_material} -""" - return rtn,gr.Radio.update(visible=False),gr.Markdown.update(visible=False,value=""),(main_material,auxi_material) - -def medicine_select_change(medicine_select): - a = get_first_material(medicine_select) - return gr.Dropdown.update(choices=["无"]+a,value="无",visible=True),gr.Dropdown.update(visible=True,value="ALL"),gr.Number.update(visible=True,value=16) - -def material_1_grade_select_change(medicine_select,material_1_grade_select,material_1_num): - a = get_first_material(medicine_select,material_1_grade_select,material_1_num) - return gr.Dropdown.update(choices=["无"]+a, value="无", visible=True) - -def material_1_select_change(medicine_select,material_1_select): - if material_1_select!="无": - a = get_second_material(medicine_select,material_1_select) - return gr.Dropdown.update(choices=["无"] + a, value="无", visible=True), gr.Dropdown.update(visible=True,value="ALL"), gr.Number.update(visible=True, value=16) - else: - return gr.Dropdown.update(choices=["无"], value="无", visible=False), gr.Dropdown.update(visible=False,value="ALL"), gr.Number.update(visible=False, value=16) - - -def material_2_grade_select_change(medicine_select,material_1_select,material_2_grade_select,material_2_num): - a = get_second_material(medicine_select,material_1_select,material_2_grade_select,material_2_num) - return gr.Dropdown.update(choices=["无"]+a, value="无", visible=True) - -def output_Radio_change(output_state,output_Radio): - main_material, auxi_material = output_state - return f"配方:主药{get_basename(main_material)}药引{get_basename(output_Radio)}辅药{get_basename(auxi_material)}丹炉陨铁炉" - -with gr.Blocks() as demo: - with gr.Tab("丹药配方"): - gr.Markdown("选择你要炼制的丹药") - with gr.Row(): - with gr.Column(): - with gr.Accordion("丹药限制",open=False): - medicine_select_acc = gr.Radio(["ALL","回复状态", "突破概率", "加攻击力"],value="ALL",show_label=False) - medicine_select = gr.Dropdown(choices=medicine_list_init,value=medicine_list_init[0],label="丹药选择") - - with gr.Row(): - material_1_grade_select = gr.Dropdown(choices=["ALL"]+[f"{i}品药材" for i in "一二三四五六七八九"],value="ALL",visible=False,label="药材等级") - material_1_num = gr.Number(value=16,label="最大数量",visible=False) - material_1_select = gr.Dropdown(visible=False,label="第一个药材") - - with gr.Row(): - material_2_grade_select = gr.Dropdown(choices=["ALL"]+[f"{i}品药材" for i in "一二三四五六七八九"],value="ALL",visible=False,label="药材等级") - material_2_num = gr.Number(value=16,label="最大数量",visible=False) - material_2_select = gr.Dropdown(visible=False,label="第二个药材") - - run_btn = gr.Button("Run") - with gr.Column(): - output_mk = gr.Markdown("输出结果") - with gr.Blocks(): - output_Radio = gr.Radio(visible=False,label="药引") - output_state = gr.State((None,None)) - output_end = gr.Markdown(visible=False) - - with gr.Tab("背包查询"): - gr.Markdown("复制全部药材到左边文本框") - with gr.Row(): - with gr.Column(): - with gr.Accordion("丹药限制", open=False): - medicine_select_acc_b = gr.Radio(["ALL", "回复状态", "突破概率", "加攻击力"], value="ALL", - show_label=False) - with gr.Row(): - medicine_select_b = gr.Dropdown(choices=["无"] + medicine_list_init, value="无", - label="丹药选择") - material_num_b = gr.Number(value=16, label="最大数量") - inp_b = gr.Text(label="药材", lines=10) - run_btn_b = gr.Button("run") - with gr.Column(): - gr.Markdown("标注颜色的为炼制丹药需要材料,绿色为数量满足,黄色为缺少") - out_l = [ - gr.HighlightedText(label=f"{i}品药材", visible=False).style(color_map={"-": "yellow", "+": "green"}) - for i in "九八七六五四三二一"] - - medicine_select_acc.change(fn=medicine_select_acc_change, inputs=[medicine_select_acc], outputs=[medicine_select]) - medicine_select.change(fn=medicine_select_change,inputs=[medicine_select],outputs=[material_1_select,material_1_grade_select,material_1_num]) - - material_1_grade_select.change(fn=material_1_grade_select_change,inputs=[medicine_select,material_1_grade_select,material_1_num],outputs=[material_1_select]) - material_1_num.change(fn=material_1_grade_select_change,inputs=[medicine_select,material_1_grade_select,material_1_num],outputs=[material_1_select]) - - material_1_select.change(fn=material_1_select_change,inputs=[medicine_select,material_1_select],outputs=[material_2_select,material_2_grade_select,material_2_num]) - - material_2_grade_select.change(fn=material_2_grade_select_change,inputs=[medicine_select, material_1_select, material_2_grade_select, material_2_num],outputs=[material_2_select]) - material_2_num.change(fn=material_2_grade_select_change,inputs=[medicine_select, material_1_select, material_2_grade_select, material_2_num],outputs=[material_2_select]) - - run_btn.click(fn=run_btn_click,inputs=[medicine_select,material_1_select,material_2_select],outputs=[output_mk,output_Radio,output_end,output_state]) - - output_Radio.change(fn=output_Radio_change,inputs=[output_state,output_Radio],outputs=[output_end]) - - # 背包 - medicine_select_acc_b.change(fn=medicine_select_acc_change_b, inputs=[medicine_select_acc_b], - outputs=[medicine_select_b]) - - run_btn_b.click(fn=check_backpack, inputs=[inp_b, medicine_select_b, material_num_b], outputs=out_l) - -demo.launch() diff --git a/spaces/chilge/Fushimi/vdecoder/hifigan/env.py b/spaces/chilge/Fushimi/vdecoder/hifigan/env.py deleted file mode 100644 index 2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056..0000000000000000000000000000000000000000 --- a/spaces/chilge/Fushimi/vdecoder/hifigan/env.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import shutil - - -class AttrDict(dict): - def __init__(self, *args, **kwargs): - super(AttrDict, self).__init__(*args, **kwargs) - self.__dict__ = self - - -def build_env(config, config_name, path): - t_path = os.path.join(path, config_name) - if config != t_path: - os.makedirs(path, exist_ok=True) - shutil.copyfile(config, os.path.join(path, config_name)) diff --git a/spaces/chilge/Fushimi/vdecoder/hifigan/models.py b/spaces/chilge/Fushimi/vdecoder/hifigan/models.py deleted file mode 100644 index bdc3fa2c3447f360472d94c2fad9bd74993f6410..0000000000000000000000000000000000000000 --- a/spaces/chilge/Fushimi/vdecoder/hifigan/models.py +++ /dev/null @@ -1,500 +0,0 @@ -import os -import json -from .env import AttrDict -import numpy as np -import torch -import torch.nn.functional as F -import torch.nn as nn -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from .utils import init_weights, get_padding - -LRELU_SLOPE = 0.1 - - -def load_model(model_path, device='cuda'): - config_file = os.path.join(os.path.split(model_path)[0], 'config.json') - with open(config_file) as f: - data = f.read() - - global h - json_config = json.loads(data) - h = AttrDict(json_config) - - generator = Generator(h).to(device) - - cp_dict = torch.load(model_path) - generator.load_state_dict(cp_dict['generator']) - generator.eval() - generator.remove_weight_norm() - del cp_dict - return generator, h - - -class ResBlock1(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.h = h - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - xt = c2(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.h = h - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class SineGen(torch.nn.Module): - """ Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__(self, samp_rate, harmonic_num=0, - sine_amp=0.1, noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - self.flag_for_pulse = flag_for_pulse - - def _f02uv(self, f0): - # generate uv signal - uv = (f0 > self.voiced_threshold).type(torch.float32) - return uv - - def _f02sine(self, f0_values): - """ f0_values: (batchsize, length, dim) - where dim indicates fundamental tone and overtones - """ - # convert to F0 in rad. The interger part n can be ignored - # because 2 * np.pi * n doesn't affect phase - rad_values = (f0_values / self.sampling_rate) % 1 - - # initial phase noise (no noise for fundamental component) - rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ - device=f0_values.device) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - - # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) - if not self.flag_for_pulse: - # for normal case - - # To prevent torch.cumsum numerical overflow, - # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. - # Buffer tmp_over_one_idx indicates the time step to add -1. - # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi - tmp_over_one = torch.cumsum(rad_values, 1) % 1 - tmp_over_one_idx = (torch.diff(tmp_over_one, dim=1)) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - - sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) - * 2 * np.pi) - else: - # If necessary, make sure that the first time step of every - # voiced segments is sin(pi) or cos(0) - # This is used for pulse-train generation - - # identify the last time step in unvoiced segments - uv = self._f02uv(f0_values) - uv_1 = torch.roll(uv, shifts=-1, dims=1) - uv_1[:, -1, :] = 1 - u_loc = (uv < 1) * (uv_1 > 0) - - # get the instantanouse phase - tmp_cumsum = torch.cumsum(rad_values, dim=1) - # different batch needs to be processed differently - for idx in range(f0_values.shape[0]): - temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] - temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] - # stores the accumulation of i.phase within - # each voiced segments - tmp_cumsum[idx, :, :] = 0 - tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum - - # rad_values - tmp_cumsum: remove the accumulation of i.phase - # within the previous voiced segment. - i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) - - # get the sines - sines = torch.cos(i_phase * 2 * np.pi) - return sines - - def forward(self, f0): - """ sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, - device=f0.device) - # fundamental component - fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device)) - - # generate sine waveforms - sine_waves = self._f02sine(fn) * self.sine_amp - - # generate uv signal - # uv = torch.ones(f0.shape) - # uv = uv * (f0 > self.voiced_threshold) - uv = self._f02uv(f0) - - # noise: for unvoiced should be similar to sine_amp - # std = self.sine_amp/3 -> max value ~ self.sine_amp - # . for voiced regions is self.noise_std - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - - # first: set the unvoiced part to 0 by uv - # then: additive noise - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """ SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - - # to produce sine waveforms - self.l_sin_gen = SineGen(sampling_rate, harmonic_num, - sine_amp, add_noise_std, voiced_threshod) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x): - """ - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - """ - # source for harmonic branch - sine_wavs, uv, _ = self.l_sin_gen(x) - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - - # source for noise branch, in the same shape as uv - noise = torch.randn_like(uv) * self.sine_amp / 3 - return sine_merge, noise, uv - - -class Generator(torch.nn.Module): - def __init__(self, h): - super(Generator, self).__init__() - self.h = h - - self.num_kernels = len(h["resblock_kernel_sizes"]) - self.num_upsamples = len(h["upsample_rates"]) - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h["upsample_rates"])) - self.m_source = SourceModuleHnNSF( - sampling_rate=h["sampling_rate"], - harmonic_num=8) - self.noise_convs = nn.ModuleList() - self.conv_pre = weight_norm(Conv1d(h["inter_channels"], h["upsample_initial_channel"], 7, 1, padding=3)) - resblock = ResBlock1 if h["resblock"] == '1' else ResBlock2 - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(h["upsample_rates"], h["upsample_kernel_sizes"])): - c_cur = h["upsample_initial_channel"] // (2 ** (i + 1)) - self.ups.append(weight_norm( - ConvTranspose1d(h["upsample_initial_channel"] // (2 ** i), h["upsample_initial_channel"] // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - if i + 1 < len(h["upsample_rates"]): # - stride_f0 = np.prod(h["upsample_rates"][i + 1:]) - self.noise_convs.append(Conv1d( - 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2)) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = h["upsample_initial_channel"] // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(h["resblock_kernel_sizes"], h["resblock_dilation_sizes"])): - self.resblocks.append(resblock(h, ch, k, d)) - - self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) - self.ups.apply(init_weights) - self.conv_post.apply(init_weights) - self.cond = nn.Conv1d(h['gin_channels'], h['upsample_initial_channel'], 1) - - def forward(self, x, f0, g=None): - # print(1,x.shape,f0.shape,f0[:, None].shape) - f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t - # print(2,f0.shape) - har_source, noi_source, uv = self.m_source(f0) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - x = x + self.cond(g) - # print(124,x.shape,har_source.shape) - for i in range(self.num_upsamples): - x = F.leaky_relu(x, LRELU_SLOPE) - # print(3,x.shape) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - # print(4,x_source.shape,har_source.shape,x.shape) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, periods=None): - super(MultiPeriodDiscriminator, self).__init__() - self.periods = periods if periods is not None else [2, 3, 5, 7, 11] - self.discriminators = nn.ModuleList() - for period in self.periods: - self.discriminators.append(DiscriminatorP(period)) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 128, 15, 1, padding=7)), - norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), - norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), - norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiScaleDiscriminator(torch.nn.Module): - def __init__(self): - super(MultiScaleDiscriminator, self).__init__() - self.discriminators = nn.ModuleList([ - DiscriminatorS(use_spectral_norm=True), - DiscriminatorS(), - DiscriminatorS(), - ]) - self.meanpools = nn.ModuleList([ - AvgPool1d(4, 2, padding=2), - AvgPool1d(4, 2, padding=2) - ]) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - if i != 0: - y = self.meanpools[i - 1](y) - y_hat = self.meanpools[i - 1](y_hat) - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - r_loss = torch.mean((1 - dr) ** 2) - g_loss = torch.mean(dg ** 2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses diff --git a/spaces/chronopt-research/ViTExCo/src/data/dataloader.py b/spaces/chronopt-research/ViTExCo/src/data/dataloader.py deleted file mode 100644 index 73512b6e53ffa0774d1526d6570150dd0918182f..0000000000000000000000000000000000000000 --- a/spaces/chronopt-research/ViTExCo/src/data/dataloader.py +++ /dev/null @@ -1,320 +0,0 @@ -import numpy as np -import pandas as pd -from src.utils import ( - CenterPadCrop_numpy, - Distortion_with_flow_cpu, - Distortion_with_flow_gpu, - Normalize, - RGB2Lab, - ToTensor, - Normalize, - RGB2Lab, - ToTensor, - CenterPad, - read_flow, - SquaredPadding, - SquaredPaddingFlow, - ResizeFlow -) -import torch -import torch.utils.data as data -import torchvision.transforms as transforms -from numpy import random -import os -from PIL import Image -from scipy.ndimage.filters import gaussian_filter -from scipy.ndimage import map_coordinates - - -def image_loader(path): - with open(path, "rb") as f: - with Image.open(f) as img: - return img.convert("RGB") - - -class CenterCrop(object): - """ - center crop the numpy array - """ - - def __init__(self, image_size): - self.h0, self.w0 = image_size - - def __call__(self, input_numpy): - if input_numpy.ndim == 3: - h, w, channel = input_numpy.shape - output_numpy = np.zeros((self.h0, self.w0, channel)) - output_numpy = input_numpy[ - (h - self.h0) // 2 : (h - self.h0) // 2 + self.h0, (w - self.w0) // 2 : (w - self.w0) // 2 + self.w0, : - ] - else: - h, w = input_numpy.shape - output_numpy = np.zeros((self.h0, self.w0)) - output_numpy = input_numpy[ - (h - self.h0) // 2 : (h - self.h0) // 2 + self.h0, (w - self.w0) // 2 : (w - self.w0) // 2 + self.w0 - ] - return output_numpy - - -class VideosDataset(torch.utils.data.Dataset): - def __init__( - self, - video_data_root, - flow_data_root, - mask_data_root, - imagenet_folder, - annotation_file_path, - image_size, - num_refs=5, # max = 20 - image_transform=None, - real_reference_probability=1, - nonzero_placeholder_probability=0.5, - ): - self.video_data_root = video_data_root - self.flow_data_root = flow_data_root - self.mask_data_root = mask_data_root - self.imagenet_folder = imagenet_folder - self.image_transform = image_transform - self.CenterPad = CenterPad(image_size) - self.ToTensor = ToTensor() - self.CenterCrop = transforms.CenterCrop(image_size) - self.SquaredPadding = SquaredPadding(image_size[0]) - self.SquaredPaddingFlow = SquaredPaddingFlow() - self.ResizeFlow = ResizeFlow(image_size) - self.num_refs = num_refs - - assert os.path.exists(self.video_data_root), "find no video dataroot" - assert os.path.exists(self.flow_data_root), "find no flow dataroot" - assert os.path.exists(self.imagenet_folder), "find no imagenet folder" - # self.epoch = epoch - self.image_pairs = pd.read_csv(annotation_file_path) - self.real_len = len(self.image_pairs) - # self.image_pairs = pd.concat([self.image_pairs] * self.epoch, ignore_index=True) - self.real_reference_probability = real_reference_probability - self.nonzero_placeholder_probability = nonzero_placeholder_probability - print("##### parsing image pairs in %s: %d pairs #####" % (video_data_root, self.__len__())) - - def __getitem__(self, index): - ( - video_name, - prev_frame, - current_frame, - flow_forward_name, - mask_name, - reference_1_name, - reference_2_name, - reference_3_name, - reference_4_name, - reference_5_name - ) = self.image_pairs.iloc[index, :5+self.num_refs].values.tolist() - - video_path = os.path.join(self.video_data_root, video_name) - flow_path = os.path.join(self.flow_data_root, video_name) - mask_path = os.path.join(self.mask_data_root, video_name) - - prev_frame_path = os.path.join(video_path, prev_frame) - current_frame_path = os.path.join(video_path, current_frame) - - reference_1_path = os.path.join(self.imagenet_folder, reference_1_name) - reference_2_path = os.path.join(self.imagenet_folder, reference_2_name) - reference_3_path = os.path.join(self.imagenet_folder, reference_3_name) - reference_4_path = os.path.join(self.imagenet_folder, reference_4_name) - reference_5_path = os.path.join(self.imagenet_folder, reference_5_name) - - flow_forward_path = os.path.join(flow_path, flow_forward_name) - mask_path = os.path.join(mask_path, mask_name) - - reference_gt_1_path = prev_frame_path - reference_gt_2_path = current_frame_path - try: - I1 = Image.open(prev_frame_path).convert("RGB") - I2 = Image.open(current_frame_path).convert("RGB") - - I_reference_video = Image.open(random.choice([reference_gt_1_path, reference_gt_2_path])).convert("RGB") - reference_path = random.choice( - [reference_1_path, reference_2_path, reference_3_path, reference_4_path, reference_5_path] - ) - I_reference_video_real = Image.open(reference_path).convert("RGB") - - flow_forward = read_flow(flow_forward_path) # numpy - - mask = Image.open(mask_path) # PIL - # binary mask - mask = np.array(mask) - mask[mask < 240] = 0 - mask[mask >= 240] = 1 - mask = self.ToTensor(mask) - - # transform - I1 = self.image_transform(I1) - I2 = self.image_transform(I2) - I_reference_video = self.image_transform(self.CenterPad(I_reference_video)) - I_reference_video_real = self.image_transform(self.CenterPad(I_reference_video_real)) - flow_forward = self.SquaredPaddingFlow(self.ResizeFlow(torch.tensor(flow_forward))) - - mask = self.SquaredPadding(mask, return_pil=False, return_paddings=False) - - if np.random.random() < self.real_reference_probability: - I_reference_output = I_reference_video_real # Use reference from imagenet - placeholder = torch.zeros_like(I1) - self_ref_flag = torch.zeros_like(I1) - else: - I_reference_output = I_reference_video # Use reference from ground truth - placeholder = I2 if np.random.random() < self.nonzero_placeholder_probability else torch.zeros_like(I1) - self_ref_flag = torch.ones_like(I1) - - outputs = [ - I1, - I2, - I_reference_output, - flow_forward, - mask, - placeholder, - self_ref_flag, - video_name + prev_frame, - video_name + current_frame, - reference_path - ] - - except Exception as e: - print("error in reading image pair: %s" % str(self.image_pairs[index])) - print(e) - return self.__getitem__(np.random.randint(0, len(self.image_pairs))) - return outputs - - def __len__(self): - return len(self.image_pairs) - - -def parse_imgnet_images(pairs_file): - pairs = [] - with open(pairs_file, "r") as f: - lines = f.readlines() - for line in lines: - line = line.strip().split("|") - image_a = line[0] - image_b = line[1] - pairs.append((image_a, image_b)) - return pairs - - -class VideosDataset_ImageNet(data.Dataset): - def __init__( - self, - imagenet_data_root, - pairs_file, - image_size, - transforms_imagenet=None, - distortion_level=3, - brightnessjitter=0, - nonzero_placeholder_probability=0.5, - extra_reference_transform=None, - real_reference_probability=1, - distortion_device='cpu' - ): - self.imagenet_data_root = imagenet_data_root - self.image_pairs = pd.read_csv(pairs_file, names=['i1', 'i2']) - self.transforms_imagenet_raw = transforms_imagenet - self.extra_reference_transform = transforms.Compose(extra_reference_transform) - self.real_reference_probability = real_reference_probability - self.transforms_imagenet = transforms.Compose(transforms_imagenet) - self.image_size = image_size - self.real_len = len(self.image_pairs) - self.distortion_level = distortion_level - self.distortion_transform = Distortion_with_flow_cpu() if distortion_device == 'cpu' else Distortion_with_flow_gpu() - self.brightnessjitter = brightnessjitter - self.flow_transform = transforms.Compose([CenterPadCrop_numpy(self.image_size), ToTensor()]) - self.nonzero_placeholder_probability = nonzero_placeholder_probability - self.ToTensor = ToTensor() - self.Normalize = Normalize() - print("##### parsing imageNet pairs in %s: %d pairs #####" % (imagenet_data_root, self.__len__())) - - def __getitem__(self, index): - pa, pb = self.image_pairs.iloc[index].values.tolist() - if np.random.random() > 0.5: - pa, pb = pb, pa - - image_a_path = os.path.join(self.imagenet_data_root, pa) - image_b_path = os.path.join(self.imagenet_data_root, pb) - - I1 = image_loader(image_a_path) - I2 = I1 - I_reference_video = I1 - I_reference_video_real = image_loader(image_b_path) - # print("i'm here get image 2") - # generate the flow - alpha = np.random.rand() * self.distortion_level - distortion_range = 50 - random_state = np.random.RandomState(None) - shape = self.image_size[0], self.image_size[1] - # dx: flow on the vertical direction; dy: flow on the horizontal direction - forward_dx = ( - gaussian_filter((random_state.rand(*shape) * 2 - 1), distortion_range, mode="constant", cval=0) * alpha * 1000 - ) - forward_dy = ( - gaussian_filter((random_state.rand(*shape) * 2 - 1), distortion_range, mode="constant", cval=0) * alpha * 1000 - ) - # print("i'm here get image 3") - for transform in self.transforms_imagenet_raw: - if type(transform) is RGB2Lab: - I1_raw = I1 - I1 = transform(I1) - for transform in self.transforms_imagenet_raw: - if type(transform) is RGB2Lab: - I2 = self.distortion_transform(I2, forward_dx, forward_dy) - I2_raw = I2 - I2 = transform(I2) - # print("i'm here get image 4") - I2[0:1, :, :] = I2[0:1, :, :] + torch.randn(1) * self.brightnessjitter - - I_reference_video = self.extra_reference_transform(I_reference_video) - for transform in self.transforms_imagenet_raw: - I_reference_video = transform(I_reference_video) - - I_reference_video_real = self.transforms_imagenet(I_reference_video_real) - # print("i'm here get image 5") - flow_forward_raw = np.stack((forward_dy, forward_dx), axis=-1) - flow_forward = self.flow_transform(flow_forward_raw) - - # update the mask for the pixels on the border - grid_x, grid_y = np.meshgrid(np.arange(self.image_size[0]), np.arange(self.image_size[1]), indexing="ij") - grid = np.stack((grid_y, grid_x), axis=-1) - grid_warp = grid + flow_forward_raw - location_y = grid_warp[:, :, 0].flatten() - location_x = grid_warp[:, :, 1].flatten() - I2_raw = np.array(I2_raw).astype(float) - I21_r = map_coordinates(I2_raw[:, :, 0], np.stack((location_x, location_y)), cval=-1).reshape( - (self.image_size[0], self.image_size[1]) - ) - I21_g = map_coordinates(I2_raw[:, :, 1], np.stack((location_x, location_y)), cval=-1).reshape( - (self.image_size[0], self.image_size[1]) - ) - I21_b = map_coordinates(I2_raw[:, :, 2], np.stack((location_x, location_y)), cval=-1).reshape( - (self.image_size[0], self.image_size[1]) - ) - I21_raw = np.stack((I21_r, I21_g, I21_b), axis=2) - mask = np.ones((self.image_size[0], self.image_size[1])) - mask[(I21_raw[:, :, 0] == -1) & (I21_raw[:, :, 1] == -1) & (I21_raw[:, :, 2] == -1)] = 0 - mask[abs(I21_raw - I1_raw).sum(axis=-1) > 50] = 0 - mask = self.ToTensor(mask) - # print("i'm here get image 6") - if np.random.random() < self.real_reference_probability: - I_reference_output = I_reference_video_real - placeholder = torch.zeros_like(I1) - self_ref_flag = torch.zeros_like(I1) - else: - I_reference_output = I_reference_video - placeholder = I2 if np.random.random() < self.nonzero_placeholder_probability else torch.zeros_like(I1) - self_ref_flag = torch.ones_like(I1) - - # except Exception as e: - # if combo_path is not None: - # print("problem in ", combo_path) - # print("problem in, ", image_a_path) - # print(e) - # return self.__getitem__(np.random.randint(0, len(self.image_pairs))) - # print("i'm here get image 7") - return [I1, I2, I_reference_output, flow_forward, mask, placeholder, self_ref_flag, "holder", pb, pa] - - def __len__(self): - return len(self.image_pairs) \ No newline at end of file diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/faiss/gpu_wrappers.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/faiss/gpu_wrappers.py deleted file mode 100644 index 24c24fac397cc89aed2271f87e0d134a12182fff..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/faiss/gpu_wrappers.py +++ /dev/null @@ -1,279 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -# @nolint - -# not linting this file because it imports * from swigfaiss, which -# causes a ton of useless warnings. - -import numpy as np - -from faiss.loader import * - - -########################################### -# GPU functions -########################################### - - -def index_cpu_to_gpu_multiple_py(resources, index, co=None, gpus=None): - """ builds the C++ vectors for the GPU indices and the - resources. Handles the case where the resources are assigned to - the list of GPUs """ - if gpus is None: - gpus = range(len(resources)) - vres = GpuResourcesVector() - vdev = Int32Vector() - for i, res in zip(gpus, resources): - vdev.push_back(i) - vres.push_back(res) - index = index_cpu_to_gpu_multiple(vres, vdev, index, co) - return index - - -def index_cpu_to_all_gpus(index, co=None, ngpu=-1): - index_gpu = index_cpu_to_gpus_list(index, co=co, gpus=None, ngpu=ngpu) - return index_gpu - - -def index_cpu_to_gpus_list(index, co=None, gpus=None, ngpu=-1): - """ Here we can pass list of GPU ids as a parameter or ngpu to - use first n GPU's. gpus mut be a list or None. - co is a GpuMultipleClonerOptions - """ - if (gpus is None) and (ngpu == -1): # All blank - gpus = range(get_num_gpus()) - elif (gpus is None) and (ngpu != -1): # Get number of GPU's only - gpus = range(ngpu) - res = [StandardGpuResources() for _ in gpus] - index_gpu = index_cpu_to_gpu_multiple_py(res, index, co, gpus) - return index_gpu - -# allows numpy ndarray usage with bfKnn - - -def knn_gpu(res, xq, xb, k, D=None, I=None, metric=METRIC_L2, device=-1): - """ - Compute the k nearest neighbors of a vector on one GPU without constructing an index - - Parameters - ---------- - res : StandardGpuResources - GPU resources to use during computation - xq : array_like - Query vectors, shape (nq, d) where d is appropriate for the index. - `dtype` must be float32. - xb : array_like - Database vectors, shape (nb, d) where d is appropriate for the index. - `dtype` must be float32. - k : int - Number of nearest neighbors. - D : array_like, optional - Output array for distances of the nearest neighbors, shape (nq, k) - I : array_like, optional - Output array for the nearest neighbors, shape (nq, k) - metric : MetricType, optional - Distance measure to use (either METRIC_L2 or METRIC_INNER_PRODUCT) - device: int, optional - Which CUDA device in the system to run the search on. -1 indicates that - the current thread-local device state (via cudaGetDevice) should be used - (can also be set via torch.cuda.set_device in PyTorch) - Otherwise, an integer 0 <= device < numDevices indicates the GPU on which - the computation should be run - - Returns - ------- - D : array_like - Distances of the nearest neighbors, shape (nq, k) - I : array_like - Labels of the nearest neighbors, shape (nq, k) - """ - nq, d = xq.shape - if xq.flags.c_contiguous: - xq_row_major = True - elif xq.flags.f_contiguous: - xq = xq.T - xq_row_major = False - else: - xq = np.ascontiguousarray(xq, dtype='float32') - xq_row_major = True - - xq_ptr = swig_ptr(xq) - - if xq.dtype == np.float32: - xq_type = DistanceDataType_F32 - elif xq.dtype == np.float16: - xq_type = DistanceDataType_F16 - else: - raise TypeError('xq must be f32 or f16') - - nb, d2 = xb.shape - assert d2 == d - if xb.flags.c_contiguous: - xb_row_major = True - elif xb.flags.f_contiguous: - xb = xb.T - xb_row_major = False - else: - xb = np.ascontiguousarray(xb, dtype='float32') - xb_row_major = True - - xb_ptr = swig_ptr(xb) - - if xb.dtype == np.float32: - xb_type = DistanceDataType_F32 - elif xb.dtype == np.float16: - xb_type = DistanceDataType_F16 - else: - raise TypeError('xb must be float32 or float16') - - if D is None: - D = np.empty((nq, k), dtype=np.float32) - else: - assert D.shape == (nq, k) - # interface takes void*, we need to check this - assert D.dtype == np.float32 - - D_ptr = swig_ptr(D) - - if I is None: - I = np.empty((nq, k), dtype=np.int64) - else: - assert I.shape == (nq, k) - - I_ptr = swig_ptr(I) - - if I.dtype == np.int64: - I_type = IndicesDataType_I64 - elif I.dtype == I.dtype == np.int32: - I_type = IndicesDataType_I32 - else: - raise TypeError('I must be i64 or i32') - - args = GpuDistanceParams() - args.metric = metric - args.k = k - args.dims = d - args.vectors = xb_ptr - args.vectorsRowMajor = xb_row_major - args.vectorType = xb_type - args.numVectors = nb - args.queries = xq_ptr - args.queriesRowMajor = xq_row_major - args.queryType = xq_type - args.numQueries = nq - args.outDistances = D_ptr - args.outIndices = I_ptr - args.outIndicesType = I_type - args.device = device - - # no stream synchronization needed, inputs and outputs are guaranteed to - # be on the CPU (numpy arrays) - bfKnn(res, args) - - return D, I - -# allows numpy ndarray usage with bfKnn for all pairwise distances - - -def pairwise_distance_gpu(res, xq, xb, D=None, metric=METRIC_L2, device=-1): - """ - Compute all pairwise distances between xq and xb on one GPU without constructing an index - - Parameters - ---------- - res : StandardGpuResources - GPU resources to use during computation - xq : array_like - Query vectors, shape (nq, d) where d is appropriate for the index. - `dtype` must be float32. - xb : array_like - Database vectors, shape (nb, d) where d is appropriate for the index. - `dtype` must be float32. - D : array_like, optional - Output array for all pairwise distances, shape (nq, nb) - metric : MetricType, optional - Distance measure to use (either METRIC_L2 or METRIC_INNER_PRODUCT) - device: int, optional - Which CUDA device in the system to run the search on. -1 indicates that - the current thread-local device state (via cudaGetDevice) should be used - (can also be set via torch.cuda.set_device in PyTorch) - Otherwise, an integer 0 <= device < numDevices indicates the GPU on which - the computation should be run - - Returns - ------- - D : array_like - All pairwise distances, shape (nq, nb) - """ - nq, d = xq.shape - if xq.flags.c_contiguous: - xq_row_major = True - elif xq.flags.f_contiguous: - xq = xq.T - xq_row_major = False - else: - raise TypeError( - 'xq matrix should be row (C) or column-major (Fortran)') - - xq_ptr = swig_ptr(xq) - - if xq.dtype == np.float32: - xq_type = DistanceDataType_F32 - elif xq.dtype == np.float16: - xq_type = DistanceDataType_F16 - else: - xq = np.ascontiguousarray(xb, dtype='float32') - xq_row_major = True - - nb, d2 = xb.shape - assert d2 == d - if xb.flags.c_contiguous: - xb_row_major = True - elif xb.flags.f_contiguous: - xb = xb.T - xb_row_major = False - else: - xb = np.ascontiguousarray(xb, dtype='float32') - xb_row_major = True - - xb_ptr = swig_ptr(xb) - - if xb.dtype == np.float32: - xb_type = DistanceDataType_F32 - elif xb.dtype == np.float16: - xb_type = DistanceDataType_F16 - else: - raise TypeError('xb must be float32 or float16') - - if D is None: - D = np.empty((nq, nb), dtype=np.float32) - else: - assert D.shape == (nq, nb) - # interface takes void*, we need to check this - assert D.dtype == np.float32 - - D_ptr = swig_ptr(D) - - args = GpuDistanceParams() - args.metric = metric - args.k = -1 # selects all pairwise distances - args.dims = d - args.vectors = xb_ptr - args.vectorsRowMajor = xb_row_major - args.vectorType = xb_type - args.numVectors = nb - args.queries = xq_ptr - args.queriesRowMajor = xq_row_major - args.queryType = xq_type - args.numQueries = nq - args.outDistances = D_ptr - args.device = device - - # no stream synchronization needed, inputs and outputs are guaranteed to - # be on the CPU (numpy arrays) - bfKnn(res, args) - - return D diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/strings.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/strings.py deleted file mode 100644 index d85bc052969438e1e05dbf3abd9c75c8effc7d03..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/strings.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import threading -from typing import Dict - -import requests - -from gradio import wasm_utils - -MESSAGING_API_ENDPOINT = "https://api.gradio.app/gradio-messaging/en" - -en = { - "RUNNING_LOCALLY": "Running on local URL: {}", - "RUNNING_LOCALLY_SEPARATED": "Running on local URL: {}://{}:{}", - "SHARE_LINK_DISPLAY": "Running on public URL: {}", - "COULD_NOT_GET_SHARE_LINK": "\nCould not create share link. Please check your internet connection or our status page: https://status.gradio.app.", - "COULD_NOT_GET_SHARE_LINK_MISSING_FILE": "\nCould not create share link. Missing file: {}. \n\nPlease check your internet connection. This can happen if your antivirus software blocks the download of this file. You can install manually by following these steps: \n\n1. Download this file: {}\n2. Rename the downloaded file to: {}\n3. Move the file to this location: {}", - "COLAB_NO_LOCAL": "Cannot display local interface on google colab, public link created.", - "PUBLIC_SHARE_TRUE": "\nTo create a public link, set `share=True` in `launch()`.", - "MODEL_PUBLICLY_AVAILABLE_URL": "Model available publicly at: {} (may take up to a minute for link to be usable)", - "GENERATING_PUBLIC_LINK": "Generating public link (may take a few seconds...):", - "BETA_INVITE": "\nThanks for being a Gradio user! If you have questions or feedback, please join our Discord server and chat with us: https://discord.gg/feTf9x3ZSB", - "COLAB_DEBUG_TRUE": "Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. " - "To turn off, set debug=False in launch().", - "COLAB_DEBUG_FALSE": "Colab notebook detected. To show errors in colab notebook, set debug=True in launch()", - "COLAB_WARNING": "Note: opening Chrome Inspector may crash demo inside Colab notebooks.", - "SHARE_LINK_MESSAGE": "\nThis share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)", - "INLINE_DISPLAY_BELOW": "Interface loading below...", - "TIPS": [ - "You can add authentication to your app with the `auth=` kwarg in the `launch()` command; for example: `gr.Interface(...).launch(auth=('username', 'password'))`", - "Let users specify why they flagged input with the `flagging_options=` kwarg; for example: `gr.Interface(..., flagging_options=['too slow', 'incorrect output', 'other'])`", - "You can show or hide the button for flagging with the `allow_flagging=` kwarg; for example: gr.Interface(..., allow_flagging=False)", - "The inputs and outputs flagged by the users are stored in the flagging directory, specified by the flagging_dir= kwarg. You can view this data through the interface by setting the examples= kwarg to the flagging directory; for example gr.Interface(..., examples='flagged')", - "You can add a title and description to your interface using the `title=` and `description=` kwargs. The `article=` kwarg can be used to add a description under the interface; for example gr.Interface(..., title='My app', description='Lorem ipsum'). Try using Markdown!", - "For a classification or regression model, set `interpretation='default'` to see why the model made a prediction.", - ], -} - - -def get_updated_messaging(en: Dict): - try: - updated_messaging = requests.get(MESSAGING_API_ENDPOINT, timeout=3).json() - en.update(updated_messaging) - except Exception: # Use default messaging - pass - - -if os.getenv("GRADIO_ANALYTICS_ENABLED", "True") == "True" and not wasm_utils.IS_WASM: - threading.Thread(target=get_updated_messaging, args=(en,)).start() diff --git a/spaces/chungsarit/ytdownload/Dockerfile b/spaces/chungsarit/ytdownload/Dockerfile deleted file mode 100644 index 40aac2f69d6e33d85ae8dd3b0932a4507b6afceb..0000000000000000000000000000000000000000 --- a/spaces/chungsarit/ytdownload/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -FROM jupyter/base-notebook:latest - -RUN fix-permissions "${CONDA_DIR}" && \ - fix-permissions "/home/${NB_USER}" - -COPY requirements.txt . -RUN pip install -r requirements.txt - -COPY /ytdownloader.py ./ytdownloader.py -COPY /theme.vue ./theme.vue - -ENV PROJ_LIB='/opt/conda/share/proj' -USER root -RUN chown -R ${NB_UID} ${HOME} -USER ${NB_USER} - -EXPOSE 8765 - -CMD ["solara", "run", "./ytdownloader.py", "--host=0.0.0.0"] \ No newline at end of file diff --git a/spaces/chuxiaojie/NAFNet/README.md b/spaces/chuxiaojie/NAFNet/README.md deleted file mode 100644 index 51c016d9592628fd229c9bf7d3a5b3585452de5d..0000000000000000000000000000000000000000 --- a/spaces/chuxiaojie/NAFNet/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: NAFNet -emoji: 🌖 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/cihyFjudo/fairness-paper-search/MechWarrior 4 Black Knight Expansion Full Movie in Italian HD 720p Watch Online or Download Now.md b/spaces/cihyFjudo/fairness-paper-search/MechWarrior 4 Black Knight Expansion Full Movie in Italian HD 720p Watch Online or Download Now.md deleted file mode 100644 index ae30df8478aa01f95e454c2d61b7538037a1b882..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/MechWarrior 4 Black Knight Expansion Full Movie in Italian HD 720p Watch Online or Download Now.md +++ /dev/null @@ -1,6 +0,0 @@ -

    shogun 2 fots multiplayer crack for cod


    Download Zip ····· https://tinurli.com/2uwk0M



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cihyFjudo/fairness-paper-search/Szenio Tablet Pc 2000 Firmware D How to Check and Verify the Firmware Version on Your Device.md b/spaces/cihyFjudo/fairness-paper-search/Szenio Tablet Pc 2000 Firmware D How to Check and Verify the Firmware Version on Your Device.md deleted file mode 100644 index cdc04b0667f7f203ad6c48d76d48a4ce3a2998cc..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Szenio Tablet Pc 2000 Firmware D How to Check and Verify the Firmware Version on Your Device.md +++ /dev/null @@ -1,8 +0,0 @@ -
    -

    Download and update firmware for products: firmware szenio pc 2000, firmware szenio tablet pc 2000, firmware szenio tablet pc 2500, szenio tablet pc 2016 dc firmware, szenio tablet pc 2500 firmware descargar, .

    -

    How to download and update firmware szenio tablet pc 2500 firmware Download ZTE firmware update gadget is very important as many of its owners regularly make the upgrades in order to make their gadgets to function ...

    -

    Szenio Tablet Pc 2000 Firmware D


    Downloadhttps://tinurli.com/2uwhF3



    -

    How to download and update firmware szenio tablet pc 2500 firmware Samsung firmware : being one of the leading producers of electronic gadgets including mobile phones, Samsung very often releases Stock Firmware ROM ...

    -

    How to download and update firmware szenio tablet pc 2500 firmware Download digital photo Cameras firmware : most of the camera s internal parts including lenses, autofocus, LCD screens etc. are controlled by ...

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ImageQt.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ImageQt.py deleted file mode 100644 index 9b7245454dfcccb4e822a6634168d405c0e791bb..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ImageQt.py +++ /dev/null @@ -1,216 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# a simple Qt image interface. -# -# history: -# 2006-06-03 fl: created -# 2006-06-04 fl: inherit from QImage instead of wrapping it -# 2006-06-05 fl: removed toimage helper; move string support to ImageQt -# 2013-11-13 fl: add support for Qt5 (aurelien.ballier@cyclonit.com) -# -# Copyright (c) 2006 by Secret Labs AB -# Copyright (c) 2006 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - -import sys -from io import BytesIO - -from . import Image -from ._util import is_path - -qt_versions = [ - ["6", "PyQt6"], - ["side6", "PySide6"], -] - -# If a version has already been imported, attempt it first -qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True) -for qt_version, qt_module in qt_versions: - try: - if qt_module == "PyQt6": - from PyQt6.QtCore import QBuffer, QIODevice - from PyQt6.QtGui import QImage, QPixmap, qRgba - elif qt_module == "PySide6": - from PySide6.QtCore import QBuffer, QIODevice - from PySide6.QtGui import QImage, QPixmap, qRgba - except (ImportError, RuntimeError): - continue - qt_is_installed = True - break -else: - qt_is_installed = False - qt_version = None - - -def rgb(r, g, b, a=255): - """(Internal) Turns an RGB color into a Qt compatible color integer.""" - # use qRgb to pack the colors, and then turn the resulting long - # into a negative integer with the same bitpattern. - return qRgba(r, g, b, a) & 0xFFFFFFFF - - -def fromqimage(im): - """ - :param im: QImage or PIL ImageQt object - """ - buffer = QBuffer() - if qt_version == "6": - try: - qt_openmode = QIODevice.OpenModeFlag - except AttributeError: - qt_openmode = QIODevice.OpenMode - else: - qt_openmode = QIODevice - buffer.open(qt_openmode.ReadWrite) - # preserve alpha channel with png - # otherwise ppm is more friendly with Image.open - if im.hasAlphaChannel(): - im.save(buffer, "png") - else: - im.save(buffer, "ppm") - - b = BytesIO() - b.write(buffer.data()) - buffer.close() - b.seek(0) - - return Image.open(b) - - -def fromqpixmap(im): - return fromqimage(im) - # buffer = QBuffer() - # buffer.open(QIODevice.ReadWrite) - # # im.save(buffer) - # # What if png doesn't support some image features like animation? - # im.save(buffer, 'ppm') - # bytes_io = BytesIO() - # bytes_io.write(buffer.data()) - # buffer.close() - # bytes_io.seek(0) - # return Image.open(bytes_io) - - -def align8to32(bytes, width, mode): - """ - converts each scanline of data from 8 bit to 32 bit aligned - """ - - bits_per_pixel = {"1": 1, "L": 8, "P": 8, "I;16": 16}[mode] - - # calculate bytes per line and the extra padding if needed - bits_per_line = bits_per_pixel * width - full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8) - bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0) - - extra_padding = -bytes_per_line % 4 - - # already 32 bit aligned by luck - if not extra_padding: - return bytes - - new_data = [] - for i in range(len(bytes) // bytes_per_line): - new_data.append( - bytes[i * bytes_per_line : (i + 1) * bytes_per_line] - + b"\x00" * extra_padding - ) - - return b"".join(new_data) - - -def _toqclass_helper(im): - data = None - colortable = None - exclusive_fp = False - - # handle filename, if given instead of image name - if hasattr(im, "toUtf8"): - # FIXME - is this really the best way to do this? - im = str(im.toUtf8(), "utf-8") - if is_path(im): - im = Image.open(im) - exclusive_fp = True - - qt_format = QImage.Format if qt_version == "6" else QImage - if im.mode == "1": - format = qt_format.Format_Mono - elif im.mode == "L": - format = qt_format.Format_Indexed8 - colortable = [] - for i in range(256): - colortable.append(rgb(i, i, i)) - elif im.mode == "P": - format = qt_format.Format_Indexed8 - colortable = [] - palette = im.getpalette() - for i in range(0, len(palette), 3): - colortable.append(rgb(*palette[i : i + 3])) - elif im.mode == "RGB": - # Populate the 4th channel with 255 - im = im.convert("RGBA") - - data = im.tobytes("raw", "BGRA") - format = qt_format.Format_RGB32 - elif im.mode == "RGBA": - data = im.tobytes("raw", "BGRA") - format = qt_format.Format_ARGB32 - elif im.mode == "I;16" and hasattr(qt_format, "Format_Grayscale16"): # Qt 5.13+ - im = im.point(lambda i: i * 256) - - format = qt_format.Format_Grayscale16 - else: - if exclusive_fp: - im.close() - msg = f"unsupported image mode {repr(im.mode)}" - raise ValueError(msg) - - size = im.size - __data = data or align8to32(im.tobytes(), size[0], im.mode) - if exclusive_fp: - im.close() - return {"data": __data, "size": size, "format": format, "colortable": colortable} - - -if qt_is_installed: - - class ImageQt(QImage): - def __init__(self, im): - """ - An PIL image wrapper for Qt. This is a subclass of PyQt's QImage - class. - - :param im: A PIL Image object, or a file name (given either as - Python string or a PyQt string object). - """ - im_data = _toqclass_helper(im) - # must keep a reference, or Qt will crash! - # All QImage constructors that take data operate on an existing - # buffer, so this buffer has to hang on for the life of the image. - # Fixes https://github.com/python-pillow/Pillow/issues/1370 - self.__data = im_data["data"] - super().__init__( - self.__data, - im_data["size"][0], - im_data["size"][1], - im_data["format"], - ) - if im_data["colortable"]: - self.setColorTable(im_data["colortable"]) - - -def toqimage(im): - return ImageQt(im) - - -def toqpixmap(im): - # # This doesn't work. For now using a dumb approach. - # im_data = _toqclass_helper(im) - # result = QPixmap(im_data["size"][0], im_data["size"][1]) - # result.loadFromData(im_data["data"]) - qimage = toqimage(im) - return QPixmap.fromImage(qimage) diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/dateutil/tz/_common.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/dateutil/tz/_common.py deleted file mode 100644 index e6ac11831522b266114d5b68ee1da298e3aeb14a..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/dateutil/tz/_common.py +++ /dev/null @@ -1,419 +0,0 @@ -from six import PY2 - -from functools import wraps - -from datetime import datetime, timedelta, tzinfo - - -ZERO = timedelta(0) - -__all__ = ['tzname_in_python2', 'enfold'] - - -def tzname_in_python2(namefunc): - """Change unicode output into bytestrings in Python 2 - - tzname() API changed in Python 3. It used to return bytes, but was changed - to unicode strings - """ - if PY2: - @wraps(namefunc) - def adjust_encoding(*args, **kwargs): - name = namefunc(*args, **kwargs) - if name is not None: - name = name.encode() - - return name - - return adjust_encoding - else: - return namefunc - - -# The following is adapted from Alexander Belopolsky's tz library -# https://github.com/abalkin/tz -if hasattr(datetime, 'fold'): - # This is the pre-python 3.6 fold situation - def enfold(dt, fold=1): - """ - Provides a unified interface for assigning the ``fold`` attribute to - datetimes both before and after the implementation of PEP-495. - - :param fold: - The value for the ``fold`` attribute in the returned datetime. This - should be either 0 or 1. - - :return: - Returns an object for which ``getattr(dt, 'fold', 0)`` returns - ``fold`` for all versions of Python. In versions prior to - Python 3.6, this is a ``_DatetimeWithFold`` object, which is a - subclass of :py:class:`datetime.datetime` with the ``fold`` - attribute added, if ``fold`` is 1. - - .. versionadded:: 2.6.0 - """ - return dt.replace(fold=fold) - -else: - class _DatetimeWithFold(datetime): - """ - This is a class designed to provide a PEP 495-compliant interface for - Python versions before 3.6. It is used only for dates in a fold, so - the ``fold`` attribute is fixed at ``1``. - - .. versionadded:: 2.6.0 - """ - __slots__ = () - - def replace(self, *args, **kwargs): - """ - Return a datetime with the same attributes, except for those - attributes given new values by whichever keyword arguments are - specified. Note that tzinfo=None can be specified to create a naive - datetime from an aware datetime with no conversion of date and time - data. - - This is reimplemented in ``_DatetimeWithFold`` because pypy3 will - return a ``datetime.datetime`` even if ``fold`` is unchanged. - """ - argnames = ( - 'year', 'month', 'day', 'hour', 'minute', 'second', - 'microsecond', 'tzinfo' - ) - - for arg, argname in zip(args, argnames): - if argname in kwargs: - raise TypeError('Duplicate argument: {}'.format(argname)) - - kwargs[argname] = arg - - for argname in argnames: - if argname not in kwargs: - kwargs[argname] = getattr(self, argname) - - dt_class = self.__class__ if kwargs.get('fold', 1) else datetime - - return dt_class(**kwargs) - - @property - def fold(self): - return 1 - - def enfold(dt, fold=1): - """ - Provides a unified interface for assigning the ``fold`` attribute to - datetimes both before and after the implementation of PEP-495. - - :param fold: - The value for the ``fold`` attribute in the returned datetime. This - should be either 0 or 1. - - :return: - Returns an object for which ``getattr(dt, 'fold', 0)`` returns - ``fold`` for all versions of Python. In versions prior to - Python 3.6, this is a ``_DatetimeWithFold`` object, which is a - subclass of :py:class:`datetime.datetime` with the ``fold`` - attribute added, if ``fold`` is 1. - - .. versionadded:: 2.6.0 - """ - if getattr(dt, 'fold', 0) == fold: - return dt - - args = dt.timetuple()[:6] - args += (dt.microsecond, dt.tzinfo) - - if fold: - return _DatetimeWithFold(*args) - else: - return datetime(*args) - - -def _validate_fromutc_inputs(f): - """ - The CPython version of ``fromutc`` checks that the input is a ``datetime`` - object and that ``self`` is attached as its ``tzinfo``. - """ - @wraps(f) - def fromutc(self, dt): - if not isinstance(dt, datetime): - raise TypeError("fromutc() requires a datetime argument") - if dt.tzinfo is not self: - raise ValueError("dt.tzinfo is not self") - - return f(self, dt) - - return fromutc - - -class _tzinfo(tzinfo): - """ - Base class for all ``dateutil`` ``tzinfo`` objects. - """ - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - - dt = dt.replace(tzinfo=self) - - wall_0 = enfold(dt, fold=0) - wall_1 = enfold(dt, fold=1) - - same_offset = wall_0.utcoffset() == wall_1.utcoffset() - same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None) - - return same_dt and not same_offset - - def _fold_status(self, dt_utc, dt_wall): - """ - Determine the fold status of a "wall" datetime, given a representation - of the same datetime as a (naive) UTC datetime. This is calculated based - on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all - datetimes, and that this offset is the actual number of hours separating - ``dt_utc`` and ``dt_wall``. - - :param dt_utc: - Representation of the datetime as UTC - - :param dt_wall: - Representation of the datetime as "wall time". This parameter must - either have a `fold` attribute or have a fold-naive - :class:`datetime.tzinfo` attached, otherwise the calculation may - fail. - """ - if self.is_ambiguous(dt_wall): - delta_wall = dt_wall - dt_utc - _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst())) - else: - _fold = 0 - - return _fold - - def _fold(self, dt): - return getattr(dt, 'fold', 0) - - def _fromutc(self, dt): - """ - Given a timezone-aware datetime in a given timezone, calculates a - timezone-aware datetime in a new timezone. - - Since this is the one time that we *know* we have an unambiguous - datetime object, we take this opportunity to determine whether the - datetime is ambiguous and in a "fold" state (e.g. if it's the first - occurrence, chronologically, of the ambiguous datetime). - - :param dt: - A timezone-aware :class:`datetime.datetime` object. - """ - - # Re-implement the algorithm from Python's datetime.py - dtoff = dt.utcoffset() - if dtoff is None: - raise ValueError("fromutc() requires a non-None utcoffset() " - "result") - - # The original datetime.py code assumes that `dst()` defaults to - # zero during ambiguous times. PEP 495 inverts this presumption, so - # for pre-PEP 495 versions of python, we need to tweak the algorithm. - dtdst = dt.dst() - if dtdst is None: - raise ValueError("fromutc() requires a non-None dst() result") - delta = dtoff - dtdst - - dt += delta - # Set fold=1 so we can default to being in the fold for - # ambiguous dates. - dtdst = enfold(dt, fold=1).dst() - if dtdst is None: - raise ValueError("fromutc(): dt.dst gave inconsistent " - "results; cannot convert") - return dt + dtdst - - @_validate_fromutc_inputs - def fromutc(self, dt): - """ - Given a timezone-aware datetime in a given timezone, calculates a - timezone-aware datetime in a new timezone. - - Since this is the one time that we *know* we have an unambiguous - datetime object, we take this opportunity to determine whether the - datetime is ambiguous and in a "fold" state (e.g. if it's the first - occurrence, chronologically, of the ambiguous datetime). - - :param dt: - A timezone-aware :class:`datetime.datetime` object. - """ - dt_wall = self._fromutc(dt) - - # Calculate the fold status given the two datetimes. - _fold = self._fold_status(dt, dt_wall) - - # Set the default fold value for ambiguous dates - return enfold(dt_wall, fold=_fold) - - -class tzrangebase(_tzinfo): - """ - This is an abstract base class for time zones represented by an annual - transition into and out of DST. Child classes should implement the following - methods: - - * ``__init__(self, *args, **kwargs)`` - * ``transitions(self, year)`` - this is expected to return a tuple of - datetimes representing the DST on and off transitions in standard - time. - - A fully initialized ``tzrangebase`` subclass should also provide the - following attributes: - * ``hasdst``: Boolean whether or not the zone uses DST. - * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects - representing the respective UTC offsets. - * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short - abbreviations in DST and STD, respectively. - * ``_hasdst``: Whether or not the zone has DST. - - .. versionadded:: 2.6.0 - """ - def __init__(self): - raise NotImplementedError('tzrangebase is an abstract base class') - - def utcoffset(self, dt): - isdst = self._isdst(dt) - - if isdst is None: - return None - elif isdst: - return self._dst_offset - else: - return self._std_offset - - def dst(self, dt): - isdst = self._isdst(dt) - - if isdst is None: - return None - elif isdst: - return self._dst_base_offset - else: - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - if self._isdst(dt): - return self._dst_abbr - else: - return self._std_abbr - - def fromutc(self, dt): - """ Given a datetime in UTC, return local time """ - if not isinstance(dt, datetime): - raise TypeError("fromutc() requires a datetime argument") - - if dt.tzinfo is not self: - raise ValueError("dt.tzinfo is not self") - - # Get transitions - if there are none, fixed offset - transitions = self.transitions(dt.year) - if transitions is None: - return dt + self.utcoffset(dt) - - # Get the transition times in UTC - dston, dstoff = transitions - - dston -= self._std_offset - dstoff -= self._std_offset - - utc_transitions = (dston, dstoff) - dt_utc = dt.replace(tzinfo=None) - - isdst = self._naive_isdst(dt_utc, utc_transitions) - - if isdst: - dt_wall = dt + self._dst_offset - else: - dt_wall = dt + self._std_offset - - _fold = int(not isdst and self.is_ambiguous(dt_wall)) - - return enfold(dt_wall, fold=_fold) - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - if not self.hasdst: - return False - - start, end = self.transitions(dt.year) - - dt = dt.replace(tzinfo=None) - return (end <= dt < end + self._dst_base_offset) - - def _isdst(self, dt): - if not self.hasdst: - return False - elif dt is None: - return None - - transitions = self.transitions(dt.year) - - if transitions is None: - return False - - dt = dt.replace(tzinfo=None) - - isdst = self._naive_isdst(dt, transitions) - - # Handle ambiguous dates - if not isdst and self.is_ambiguous(dt): - return not self._fold(dt) - else: - return isdst - - def _naive_isdst(self, dt, transitions): - dston, dstoff = transitions - - dt = dt.replace(tzinfo=None) - - if dston < dstoff: - isdst = dston <= dt < dstoff - else: - isdst = not dstoff <= dt < dston - - return isdst - - @property - def _dst_base_offset(self): - return self._dst_offset - self._std_offset - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s(...)" % self.__class__.__name__ - - __reduce__ = object.__reduce__ diff --git a/spaces/congsaPfin/Manga-OCR/logs/3 Patti Vungo 2021 The Ultimate Guide to Download Install and Play.md b/spaces/congsaPfin/Manga-OCR/logs/3 Patti Vungo 2021 The Ultimate Guide to Download Install and Play.md deleted file mode 100644 index 939b7a77b0e9a92223f579f0db2dd41bcccc7b21..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/3 Patti Vungo 2021 The Ultimate Guide to Download Install and Play.md +++ /dev/null @@ -1,129 +0,0 @@ -
    -

    3 Patti Vungo 2021 APK Download: How to Play and Win the Indian Poker Game

    -

    If you are a fan of card games, especially the Indian version of poker, then you must have heard of 3 Patti. It is a popular gambling game that originated in the Indian subcontinent and is played by millions of people across South Asia. But did you know that there is a new and improved version of 3 Patti that you can play on your Android device? It is called 3 Patti Vungo, and it is one of the best 3 Patti game apps available on the market. In this article, we will tell you everything you need to know about 3 Patti Vungo, how to download and install it on your phone, how to play it online or offline, and how to win big at this exciting game.

    -

    3 patti vungo 2021 apk download


    Download File · https://urlca.com/2uO84P



    -

    What is 3 Patti Vungo?

    -

    3 Patti Vungo is an online software or app that lets you play the card game of 3 Patti on your smartphone or tablet. You can play it with your friends or real players from around the world, or you can play it offline with computer or bots. You can also invite and challenge your Facebook friends to join the game. The game is based on the traditional 3 Patti card game that originated in India, but it also has some influences from poker. It is also known as flush or flash in some areas.

    -

    Features and benefits of 3 Patti Vungo

    -

    Some of the features and benefits of playing 3 Patti Vungo are:

    -
      -
    • You can start with 2,50,000 chips and get more chips every few minutes and daily bonus chips.
    • -
    • You can choose from different game modes and variations of 3 Patti, such as classic mode, chatai, joker/paplu, muflis, AK47, JKQK, etc.
    • -
    • You can chat with your opponents and use emoticons to express yourself.
    • -
    • You can enjoy high-quality graphics and sound effects that make the game more realistic and immersive.
    • -
    • You can get gold class support for any issues or queries you may have.
    • -
    • You can learn the rules and tips of the game from the app itself.
    • -
    -

    How to download and install 3 Patti Vungo APK on your Android device

    -

    To download and install 3 Patti Vungo APK on your Android device, follow these simple steps:

    -

    3 patti vungo game free download for android
    -3 patti vungo app latest version 2021
    -3 patti vungo online play with friends
    -3 patti vungo mod apk unlimited chips
    -3 patti vungo hack apk download 2021
    -3 patti vungo apkcombo.com download
    -3 patti vungo teenpattiearning.com download
    -3 patti vungo apk pure download
    -3 patti vungo apk mirror download
    -3 patti vungo apkmonk.com download
    -how to play 3 patti vungo with real money
    -how to win in 3 patti vungo every time
    -how to get free chips in 3 patti vungo
    -how to use force side show in 3 patti vungo
    -how to invite friends in 3 patti vungo
    -best tips and tricks for 3 patti vungo
    -best strategy for 3 patti vungo muflis mode
    -best slots to play in 3 patti vungo
    -best maang patta single card poker in 3 patti vungo
    -best public and private tables in 3 patti vungo
    -3 patti vungo review and rating by users
    -3 patti vungo customer care and support number
    -3 patti vungo complaints and feedback forum
    -3 patti vungo terms and conditions policy
    -3 patti vungo privacy and security policy
    -is 3 patti vungo legal and safe to play in India
    -is 3 patti vungo rigged or fair game play
    -is 3 patti vungo addictive or fun game play
    -is 3 patti vungo compatible with all android devices
    -is 3 patti vungo available for ios and pc users
    -compare 3 patti vungo with other teen patti games
    -top alternatives to 3 patti vungo for android users
    -top competitors of 3 patti vungo in indian poker category
    -top features and benefits of playing 3 patti vungo
    -top reasons to download and play 3 patti vungo now
    -what is new and updated in 3 patti vungo version 1.3
    -what is the size and requirement of 3 patti vungo apk file
    -what is the difference between fixed and no limit tables in 3 patti vungo
    -what is the meaning and origin of the name "vungo" in 3 patti game
    -what are the rules and regulations of playing 3 patti vungo online

    -
      -
    1. Go to [1](https://apkcombo.com/3patti-vungo/com.threepattiandroidgame.vungo/) or [2](https://apkcombo.com/3patti-vungo/com.gamblegame.poker.tpvungo/) or [3](https://apkcombo.com/3-patti-vungo/com.jddlhyvungo.card) on your browser and click on the download button.
    2. -
    3. Once the APK file is downloaded, locate it on your device and tap on it to start the installation process.
    4. -
    5. If you see a warning message that says "Install blocked", go to your device settings and enable "Unknown sources" under security options.
    6. -
    7. After that, go back to the APK file and tap on it again to continue the installation.
    8. -
    9. Wait for a few seconds until the app is installed successfully on your device.
    10. -
    11. Launch the app and enjoy playing 3 Patti Vungo online or offline.
    12. -
    -

    How to play 3 Patti Vungo online or offline

    -

    Playing 3 Patti Vungo is easy and fun. You just need to follow these basic steps:

    -
      -
    1. Select a game mode and a table that suits your preference and budget. You can choose from different stakes, blinds, and pot limits.
    2. -
    3. Join the table and wait for the dealer to deal three cards to each player. You can also see your own cards on the screen.
    4. -
    5. Based on your cards and the betting actions of other players, you can decide whether to play or fold. To play, you need to place a bet that is equal to or higher than the current stake. To fold, you need to discard your cards and exit the game.
    6. -
    7. The betting round continues until all the players have either folded or placed equal bets. The remaining players then reveal their cards and the player with the best hand wins the pot.
    8. -
    9. You can also use the chat feature to communicate with other players and use emoticons to express your emotions.
    10. -
    -

    Basic rules and hand rankings of 3 Patti

    -

    The rules of 3 Patti are similar to poker, but there are some differences. Here are some of the basic rules and hand rankings of 3 Patti:

    -
      -
    • The game is played with a standard 52-card deck, without jokers.
    • -
    • The cards are ranked from high to low as follows: A, K, Q, J, 10, 9, 8, 7, 6, 5, 4, 3, 2. Ace can be used as either high or low card.
    • -
    • The hands are ranked from high to low as follows: Trail (three of a kind), Pure Sequence (straight flush), Sequence (straight), Color (flush), Pair (two of a kind), High Card (no pair).
    • -
    • If two or more players have the same hand rank, the one with the higher card value wins. If the card values are also equal, then the pot is split among them.
    • -
    • If a player has a trail of aces, he or she can demand that the other players show their cards before betting. This is called "seen" or "chaal". If a player has a trail of twos, he or she can demand that the other players fold their cards before betting. This is called "blind" or "patta".
    • -
    -

    Different game modes and variations of 3 Patti

    -

    One of the best things about 3 Patti Vungo is that it offers different game modes and variations of 3 Patti that make the game more interesting and challenging. Some of these are:

    -
      -
    • Classic mode: This is the standard mode of 3 Patti where you can play with real players or bots.
    • -
    • Chatai: This is a mode where you can play with four cards instead of three. The best three-card hand is considered for winning.
    • -
    • Joker/Paplu: This is a mode where one random card is selected as a joker or paplu. This card can be used as any card to make a hand.
    • -
    • Muflis: This is a mode where the lowest hand wins instead of the highest hand.
    • -
    • AK47: This is a mode where all the Aces, Kings, Fours, and Sevens are considered as jokers or paplus.
    • -
    • JKQK: This is a mode where all the Jacks, Queens, Kings, and Aces are removed from the deck before dealing.
    • -
    -

    Tips and tricks to win at 3 Patti

    -

    Playing 3 Patti Vungo requires both luck and skill. Here are some tips and tricks that can help you win at this game:

    -
      -
    • Know your hand strength and bet accordingly. Don't bet too much on weak hands or too little on strong hands.
    • -
    • Observe your opponents' betting patterns and try to guess their cards. You can also use the chat feature to bluff or mislead them.
    • -
    • Don't be afraid to fold if you think you have a losing hand. It is better to save your chips for the next round than to lose them all in one go.
    • -
    • Don't be too predictable or too random in your betting. Vary your bets according to the situation and keep your opponents guessing.
    • -
    • Practice regularly and learn from your mistakes. You can also watch videos or read articles on how to play 3 Patti better.
    • -
    -

    Why choose 3 Patti Vungo over other 3 Patti apps

    -

    There are many 3 Patti apps available on the internet, but not all of them are worth your time and money. Some of them may have poor graphics, slow performance, limited features, or unfair gameplay. That is why you should choose 3 Patti Vungo over other 3 Patti apps, because it offers you the following advantages:

    -

    High-quality graphics and sound effects

    -

    3 Patti Vungo has high-quality graphics and sound effects that make the game more realistic and immersive. You can see the cards, the table, the chips, and the players clearly and vividly. You can also hear the sound of the cards being shuffled, dealt, and flipped, as well as the voice of the dealer and the background music. The game also has a smooth and fast performance that does not lag or crash.

    -

    Free chips and bonuses every day

    -

    3 Patti Vungo gives you free chips and bonuses every day that you can use to play more games and win more money. You can get 2,50,000 chips when you start the game, and more chips every few minutes. You can also get daily bonus chips by logging in every day, spinning the wheel of fortune, watching videos, or completing tasks. You can also buy more chips with real money if you want to.

    -

    Chat and interact with other players

    -

    3 Patti Vungo lets you chat and interact with other players from around the world who share your passion for 3 Patti. You can send messages and emoticons to your opponents, friends, or strangers. You can also invite and challenge your Facebook friends to join the game. You can also make new friends and join clubs or communities of 3 Patti lovers.

    -

    Conclusion

    -

    3 Patti Vungo is a great app for anyone who loves playing 3 Patti or wants to learn how to play it. It is easy to download and install on your Android device, and it offers you many features and benefits that make the game more fun and rewarding. You can play it online or offline, with real players or bots, with different game modes and variations, with high-quality graphics and sound effects, with free chips and bonuses every day, and with chat and interaction features. So what are you waiting for? Download 3 Patti Vungo today and enjoy playing the Indian poker game like never before.

    -

    FAQs

    -

    Here are some frequently asked questions about 3 Patti Vungo:

    -
      -
    • Q: Is 3 Patti Vungo safe and secure?
    • -
    • A: Yes, 3 Patti Vungo is safe and secure. It uses advanced encryption technology to protect your personal information and transactions. It also follows fair play policies and does not use any bots or cheats.
    • -
    • Q: How can I contact the customer support of 3 Patti Vungo?
    • -
    • A: You can contact the customer support of 3 Patti Vungo by sending an email to [4](mailto:support@vungogames.com) or by visiting their Facebook page [5](https://www.facebook.com/ThreePattiVungo/).
    • -
    • Q: Can I play 3 Patti Vungo on my PC or laptop?
    • -
    • A: Yes, you can play 3 Patti Vungo on your PC or laptop by using an Android emulator such as Bluestacks or Nox Player.
    • -
    • Q: Can I play 3 Patti Vungo with real money?
    • -
    • A: No, 3 Patti Vungo is a free-to-play game that does not involve any real money gambling. It is only for entertainment purposes.
    • -
    • Q: Can I play 3 Patti Vungo offline?
    • -
    • A: Yes, you can play 3 Patti Vungo offline with computer or bots. However, you will need an internet connection to play online with real players or to access some features such as chat or bonuses.
    • -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/BLACKPINK THE GAME APK Bulmacalar zerek Grubunuzu hrete Ulatrn..md b/spaces/congsaPfin/Manga-OCR/logs/BLACKPINK THE GAME APK Bulmacalar zerek Grubunuzu hrete Ulatrn..md deleted file mode 100644 index d8f30d28dcaf0f2f72a1618b72c95382ba977975..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/BLACKPINK THE GAME APK Bulmacalar zerek Grubunuzu hrete Ulatrn..md +++ /dev/null @@ -1,108 +0,0 @@ - -

    |

    BLACKPINK THE GAME: A Fun and Challenging Mobile Game for K-Pop Fans

    | | Heading 2 |

    |

    How to Play BLACKPINK THE GAME

    | | Heading 3 |

    |

    Management

    | | Heading 4 |

    |

    Merchandise Development Room

    | | Paragraph |

    |

    This room generates gold every second, which you can use to build new rooms or upgrade existing ones.

    -

    blackpink the game apk indir


    Download File ★★★★★ https://urlca.com/2uOfhI



    | | Bold text | or | This is bold text. or This is also bold text. | | Italic text | or | This is italic text. or This is also italic text. | | Hyperlink | | This is a hyperlink. | | Image | or | BLACKPINK logo or BLACKPINK logo | | List item (unordered) |
    or
    etc. |
    • This is a list item.
    • This is another list item.
    | | List item (ordered) |
    or
    etc. |
    1. This is a list item.
    2. This is another list item.
    | | Table (with border) |
    or more complex structures with multiple rows and columns. Use colspan="" or rowspan="" attributes for merged cells. Use align="" attribute for alignment. Use bgcolor="" attribute for background color. Use style="" attribute for more styling options. Use caption element for table caption. Use th element for table header. Use td element Based on the outline and the web search results, I have written the following article for you. Please note that this is a draft and you may need to edit and proofread it before publishing. I hope you find it helpful and enjoyable.

    BLACKPINK THE GAME: A Fun and Challenging Mobile Game for K-Pop Fans

    -

    Are you a fan of BLACKPINK, the global sensation K-pop group that consists of Jisoo, Jennie, Rosé, and Lisa? Do you want to experience what it's like to be their producer and manager? Do you want to play with them in a cute and colorful 3D world? If you answered yes to any of these questions, then you should definitely check out BLACKPINK THE GAME, a mobile game that lets you do all of these things and more!

    -

    BLACKPINK THE GAME is a game that combines management, puzzle, and mini-game elements to create a fun and challenging gameplay experience. You can build and upgrade your own agency, solve puzzles to clear schedules for BLACKPINK, customize your members with stunning outfits, and play mini-games with your friends in BLACKPINK WORLD. You can also collect and level up exclusive photo and video cards of BLACKPINK, and show off your style and skills to other players.

    -

    In this article, we will give you an overview of how to play BLACKPINK THE GAME, some tips and tricks to get the best scores and rewards, and how to download and install the game on your device. We will also answer some frequently asked questions about the game at the end. So, without further ado, let's get started! -

    How to Play BLACKPINK THE GAME

    -

    BLACKPINK THE GAME has four main features: management, schedule, world, and avatar. Each feature has its own gameplay mechanics and objectives. Let's take a look at each one in detail.

    -

    blackpink the game apk download uptodown
    -blackpink the game apk free download for android
    -blackpink the game apk mod unlimited money
    -blackpink the game apk latest version 2023
    -blackpink the game apk obb data offline
    -blackpink the game apk english version
    -blackpink the game apk hack cheats
    -blackpink the game apk full unlocked
    -blackpink the game apk indir android oyun club
    -blackpink the game apk indir ücretsiz
    -blackpink the game apk indir son sürüm
    -blackpink the game apk indir hileli
    -blackpink the game apk indir kurulumu
    -blackpink the game apk indir türkçe
    -blackpink the game apk indir pc
    -blackpink the game apk indir ios
    -blackpink the game apk indir cepde
    -blackpink the game apk indir apkpure
    -blackpink the game apk indir apkmirror
    -blackpink the game apk indir apkombo
    -blackpink the game apk indir yandex disk
    -blackpink the game apk indir google drive
    -blackpink the game apk indir mega link
    -blackpink the game apk indir mediafire
    -blackpink the game apk indir dropbox
    -how to install blackpink the game apk on android device
    -how to play blackpink the game apk online with friends
    -how to update blackpink the game apk to latest version
    -how to get free diamonds in blackpink the game apk
    -how to unlock all outfits in blackpink the game apk
    -how to solve puzzles in blackpink the game apk
    -how to level up members in blackpink the game apk
    -how to customize avatars in blackpink the game apk
    -how to collect photo and video cards in blackpink the game apk
    -how to run blackpink the game apk on pc with emulator
    -how to fix blackpink the game apk not working or crashing issues
    -how to contact support for blackpink the game apk problems or feedbacks
    -is blackpink the game apk safe and secure to download and play
    -is blackpink the game apk official and authorized by yg entertainment and takeone company
    -is blackpink the game apk available in india and other countries or regions
    -what is new and improved in blackpink the game apk latest update or patch notes
    -what are the features and benefits of playing blackpink the game apk on android device or pc
    -what are the requirements and specifications for installing and running blackpink the game apk smoothly and efficiently
    -what are the ratings and reviews of other users who have downloaded and played blackpink the game apk
    -what are some tips and tricks for playing and enjoying blackpink the game apk more fun and easy
    -what are some alternatives or similar games to blackpink the game apk for android device or pc
    -where can i find more information and resources about blackpink the game apk such as website, social media, wiki, faq, etc.

    -

    Management

    -

    In this feature, you can build and upgrade rooms in your agency that will help you train and support BLACKPINK. There are four types of rooms: merchandise development room, vocal training room, dance training room, and acting training room. Each room generates different resources that you can use to improve your gameplay.

    -

    The merchandise development room generates gold every second, which you can use to build new rooms or upgrade existing ones. The vocal training room generates records every second, which you can use to level up your photo cards. The dance training room generates energy every second, which you can use to play schedules or mini-games. The acting training room generates stars every second, which you can use to upgrade your video cards.

    -

    You can also train your members in each room by tapping on them. This will increase their stats such as vocal, dance, acting, charisma, stamina, etc. Higher stats will help you clear schedules faster and easier.

    -

    Schedule

    -

    In this feature, you can solve puzzles to clear stages for BLACKPINK. Each stage represents a schedule that BLACKPINK has to complete, such as recording a song, filming a music video, performing on stage, etc. You can choose from four difficulty levels: easy, normal, hard, or extreme.

    -

    The puzzles are similar to match-3 games where you have to swipe blocks of the same color to destroy them. However, there is a twist: you have to destroy all the blocks in one stroke! You can also use special blocks such as bombs or rockets to clear more blocks at once.

    -

    Clearing stages will reward you with photo cards of BLACKPINK. Photo cards are collectible items that show different images of the members. They also have different rarities: normal (N), rare (R), super rare (SR), ultra rare (UR), or legendary (L). Higher rarity cards have higher stats and skills that can help you clear stages faster and easier.

    -

    World

    -

    In this feature, you can play mini-games with friends in BLACKPINK WORLD. This is a 3D space where you can meet other players in real-time. You can chat with them using text or voice messages, send them gifts or stickers, or invite them to play mini-games with you.

    -

    The mini-games are simple but fun games that test your reflexes or memory. For example, there is a game where you have to tap on the screen when the music notes reach the center of the circle. There is also a game where you have to memorize the order of the colors that flash on the screen.

    -

    Playing mini-games will reward you with coins that you can use to buy items in the shop. You can also complete tasks in the world area that will reward you with gold or records.

    -

    Avatar

    -

    In this feature, you can customize your members with stunning outfits and accessories. You can dress them up according to your preference or the theme of the stage. You can also change their hairstyles, makeup, and expressions.

    -

    You can also show off your style and skills to other players by participating in the avatar contest. This is a weekly event where you can submit your best avatar and vote for other players' avatars. The winners will receive special rewards such as coins, records, or rare photo cards.

    -

    Tips and Tricks to Get the Best Scores and Rewards in BLACKPINK THE GAME

    -

    Now that you know how to play BLACKPINK THE GAME, you might be wondering how to get the best scores and rewards in the game. Here are some tips and tricks that will help you improve your gameplay and enjoy the game more.

    -
      -
    • Use promo coupons to get free gold, records, energy, or stars. You can find promo coupons on the official social media accounts of BLACKPINK THE GAME or on fan sites. To use them, go to the settings menu and tap on the coupon icon. Enter the code and tap on confirm.
    • -
    • Master the schedules by learning the patterns of the blocks and using the right skills. Each schedule has a different layout of blocks that you have to memorize and swipe in one stroke. You can also use skills that are activated by certain photo cards to destroy more blocks or get more time. For example, Jisoo's skill can destroy all blocks of one color, Jennie's skill can add 5 seconds to the timer, Rosé's skill can destroy a 3x3 area of blocks, and Lisa's skill can destroy a horizontal line of blocks.
    • -
    • Boost your cards by leveling them up, upgrading them, or awakening them. Leveling up your cards will increase their stats and skills. Upgrading your cards will increase their rarity and unlock new images. Awakening your cards will unlock their full potential and give them special effects.
    • -
    • Explore the management area by tapping on different objects or characters. You might find hidden items or events that will reward you with gold, records, energy, stars, or photo cards. For example, you might find a treasure chest in the merchandise development room, a fan letter in the vocal training room, a dance instructor in the dance training room, or a director in the acting training room.
    • -
    • Get daily freebies by logging in every day, completing daily missions, or spinning the lucky wheel. You can get various rewards such as gold, records, energy, stars, photo cards, video cards, coins, or items. You can also get bonus rewards for logging in for consecutive days or completing all daily missions.
    • -
    • Explore the world area by playing mini-games with friends or strangers. You can earn coins by winning mini-games or completing tasks. You can also make new friends by chatting with them or sending them gifts or stickers.
    • -
    • Check the mail regularly for messages from BLACKPINK or other players. You might receive gifts or invitations from them. You can also send messages or gifts to other players to show your appreciation or friendship.
    • -
    • Manage your resources wisely by spending them on things that will benefit you in the long run. For example, you should spend gold on building new rooms or upgrading existing ones, records on leveling up your photo cards, energy on playing schedules or mini-games, stars on upgrading your video cards, coins on buying items in the shop, etc.
    • -
    -

    How to Download and Install BLACKPINK THE GAME on Your Device

    -

    If you are interested in playing BLACKPINK THE GAME on your device, here are the steps that you need to follow:

    -
      -
    1. Go to the official website of BLACKPINK THE GAME and choose your device type: Android or iOS.
    2. -
    3. For Android devices, tap on the Google Play Store icon and download the game from there. For iOS devices, tap on the App Store icon and download the game from there.
    4. -
    5. Alternatively, you can scan the QR code on the website with your device's camera and it will direct you to the download page.
    6. -
    7. Once you have downloaded the game, open it and follow the instructions to create your account and start playing.
    8. -
    -

    Conclusion

    -

    In conclusion, BLACKPINK THE GAME is a fun and challenging mobile game for K-pop fans who want to experience what it's like to be BLACKPINK's producer and manager. You can build and upgrade your own agency, solve puzzles to clear schedules for BLACKPINK, customize your members with stunning outfits and accessories, and play mini-games with your friends in BLACKPINK WORLD. You can also collect and level up exclusive photo and video cards of BLACKPINK, and show off your style and skills to other players.

    -

    BLACKPINK THE GAME is a game that will keep you entertained and challenged for hours. You will also learn more about BLACKPINK and their music, and feel closer to them. If you are a fan of BLACKPINK or K-pop in general, you should definitely give this game a try. You won't regret it!

    -

    If you enjoyed this article, please share it with your friends and family who might also be interested in playing BLACKPINK THE GAME. Also, feel free to leave your comments and feedback below. We would love to hear from you!

    -

    Frequently Asked Questions

    -

    Here are some of the most common questions that people ask about BLACKPINK THE GAME:

    -

    Q: Is BLACKPINK THE GAME free to play?

    -

    A: Yes, BLACKPINK THE GAME is free to download and play. However, there are some optional in-app purchases that you can make to enhance your gameplay or support the developers.

    -

    Q: How can I get more photo or video cards of BLACKPINK?

    -

    A: You can get more photo or video cards of BLACKPINK by clearing stages, playing mini-games, completing tasks, participating in events, or buying them with gold or records.

    -

    Q: How can I contact the customer service of BLACKPINK THE GAME?

    -

    A: You can contact the customer service of BLACKPINK THE GAME by going to the settings menu and tapping on the customer service icon. You can also send an email to blackpinkthegame@support.com or visit the official website of BLACKPINK THE GAME for more information.

    -

    Q: How can I connect with other players of BLACKPINK THE GAME?

    -

    A: You can connect with other players of BLACKPINK THE GAME by joining the official fan club, chatting with them in the world area, sending them messages or gifts, inviting them to play mini-games with you, or following them on social media.

    -

    Q: How can I update BLACKPINK THE GAME to the latest version?

    -

    A: You can update BLACKPINK THE GAME to the latest version by going to the Google Play Store or the App Store and tapping on the update button. You can also enable automatic updates in your device settings.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Cargo Delivery Truck Rentals What You Need to Know Before You Rent.md b/spaces/congsaPfin/Manga-OCR/logs/Cargo Delivery Truck Rentals What You Need to Know Before You Rent.md deleted file mode 100644 index da313a987617315c5d8a5556343d2128a4190cd0..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Cargo Delivery Truck Rentals What You Need to Know Before You Rent.md +++ /dev/null @@ -1,165 +0,0 @@ -
    -

    Cargo Delivery Truck: What You Need to Know

    -

    If you are in the business of transporting goods, commodities, or cargo, you know how important it is to have a reliable and efficient vehicle. A cargo delivery truck is a type of vehicle that is designed to carry large or heavy loads in a secure and enclosed space. Whether you need to deliver dry goods, refrigerated products, or bulky items, there is a cargo delivery truck that can suit your needs.

    -

    cargo delivery truck


    Download File →→→ https://urlca.com/2uOfbc



    -

    In this article, we will explain what a cargo delivery truck is, why you need one, and how to choose the right one for your business. We will also discuss the different types of cargo delivery trucks, their features and benefits, and their examples and prices. Finally, we will give you some tips on how to maintain and operate your cargo delivery truck safely and efficiently.

    -

    Introduction

    -

    What is a cargo delivery truck?

    -

    A cargo delivery truck is a vehicle that has a large box-like structure attached to its chassis. The box, also known as the cargo area or the body, can be accessed through doors at the rear, side, or top. The cargo area can be customized to fit different types of goods, such as pallets, crates, boxes, barrels, or furniture. The cargo area can also be insulated, refrigerated, heated, or ventilated to preserve the quality of the goods.

    -

    Why do you need a cargo delivery truck?

    -

    A cargo delivery truck can offer you many advantages over other types of vehicles, such as:

    -
      -
    • It can carry more goods in one trip, saving you time and fuel costs.
    • -
    • It can protect your goods from weather, theft, or damage during transit.
    • -
    • It can improve your customer satisfaction by delivering your goods on time and in good condition.
    • -
    • It can enhance your brand image by displaying your logo or slogan on the body.
    • -
    • It can increase your productivity and profitability by reducing your operational costs and increasing your revenue.
    • -
    -

    How to choose the right cargo delivery truck for your business?

    -

    Before you buy or rent a cargo delivery truck, you need to consider several factors, such as:

    -
      -
    • Your budget: How much can you afford to spend on a cargo delivery truck?
    • -
    • Your needs: What kind of goods do you transport? How much do they weigh? How often do you transport them? How far do you transport them?
    • -
    • Your preferences: What features do you want in your cargo delivery truck? What size do you need? What color do you like?
    • -
    • Your options: What are the available models and brands of cargo delivery trucks in the market? What are their pros and cons?
    • -
    -

    You can also consult with a professional or an expert who can help you find the best cargo delivery truck for your business.

    -

    Types of Cargo Delivery Trucks

    -

    Dry Cargo Delivery Trucks

    -

    Features and benefits

    -

    A dry cargo delivery truck is a type of cargo delivery truck that is used to transport non-perishable goods that do not require temperature control. Some examples of dry goods are clothing, furniture, electronics, books, or toys. A dry cargo delivery truck has a simple and sturdy design that can withstand rough handling and loading. It also has a spacious and versatile interior that can accommodate different sizes and shapes

    Flatbed Cargo Delivery Trucks

    -

    Features and benefits

    -

    A flatbed cargo delivery truck is a type of cargo delivery truck that is used to transport large or heavy goods that cannot fit in a closed or enclosed space. Some examples of flatbed goods are construction materials, machinery, vehicles, or containers. A flatbed cargo delivery truck has a flat and open platform that can be loaded or unloaded from any side or angle. It also has a strong and durable frame that can support heavy loads. A flatbed cargo delivery truck can offer you more flexibility and convenience in transporting your goods, as well as save you loading and unloading time.

    -

    Examples and prices

    -

    Some examples of flatbed cargo delivery trucks are:

    -

    * cargo delivery truck rental
    -* cargo delivery truck for sale
    -* cargo delivery truck driver
    -* cargo delivery truck dimensions
    -* cargo delivery truck insurance
    -* cargo delivery truck lease
    -* cargo delivery truck jobs
    -* cargo delivery truck price
    -* cargo delivery truck license
    -* cargo delivery truck near me
    -* cargo delivery truck service
    -* cargo delivery truck weight
    -* cargo delivery truck capacity
    -* cargo delivery truck financing
    -* cargo delivery truck reviews
    -* cargo delivery truck accessories
    -* cargo delivery truck parts
    -* cargo delivery truck maintenance
    -* cargo delivery truck repair
    -* cargo delivery truck safety
    -* cargo delivery truck fuel economy
    -* cargo delivery truck mileage
    -* cargo delivery truck brands
    -* cargo delivery truck models
    -* cargo delivery truck features
    -* cargo delivery truck specifications
    -* cargo delivery truck comparison
    -* cargo delivery truck options
    -* cargo delivery truck customization
    -* cargo delivery truck warranty
    -* cargo delivery truck tips
    -* cargo delivery truck tricks
    -* cargo delivery truck hacks
    -* cargo delivery truck benefits
    -* cargo delivery truck advantages
    -* cargo delivery truck disadvantages
    -* cargo delivery truck challenges
    -* cargo delivery truck solutions
    -* cargo delivery truck best practices
    -* cargo delivery truck trends
    -* cargo delivery truck statistics
    -* cargo delivery truck facts
    -* cargo delivery truck history
    -* cargo delivery truck future
    -* cargo delivery truck innovations
    -* cargo delivery truck regulations
    -* cargo delivery truck standards
    -* cargo delivery truck requirements
    -* cargo delivery truck guidelines

    - - - - - - - - - - - - - - - - - - - - - -
    ModelCapacityPrice
    Ford F-450 Flatbed TruckUp to 12,000 lbs$51,875 - $61,375
    Chevrolet Silverado 3500HD Flatbed TruckUp to 14,500 lbs$36,895 - $65,395
    Dodge Ram 5500 Flatbed TruckUp to 19,500 lbs$39,995 - $69,995
    -

    Tips for Cargo Delivery Truck Maintenance and Safety

    -

    How to keep your cargo delivery truck in good condition?

    -

    To ensure the longevity and performance of your cargo delivery truck, you need to follow some basic maintenance tips, such as:

    -
      -
    • Check your oil, coolant, brake fluid, and tire pressure regularly.
    • -
    • Replace your air filter, oil filter, spark plugs, and belts as needed.
    • -
    • Clean your cargo area and remove any debris or spills.
    • -
    • Lubricate your hinges, locks, and latches.
    • -
    • Inspect your lights, brakes, suspension, and steering system.
    • -
    • Schedule a professional service at least once a year.
    • -
    -

    How to prevent cargo theft and damage?

    -

    To protect your cargo from theft and damage, you need to follow some basic security tips, such as:

    -
      -
    • Lock your doors and windows when you leave your cargo delivery truck.
    • -
    • Park your cargo delivery truck in a well-lit and secure area.
    • -
    • Use a GPS tracker or an alarm system on your cargo delivery truck.
    • -
    • Avoid leaving your cargo unattended or exposed for a long time.
    • -
    • Use straps, tarps, nets, or covers to secure your cargo.
    • -
    • Label your cargo with your contact information and inventory list.
    • How to comply with cargo delivery regulations and standards?

      -

      To ensure the safety and legality of your cargo delivery, you need to follow some basic compliance tips, such as:

      -
        -
      • Obtain the necessary licenses, permits, and insurance for your cargo delivery truck and your cargo.
      • -
      • Follow the weight, size, and speed limits for your cargo delivery truck and your cargo.
      • -
      • Use the appropriate signs, labels, and placards for your cargo delivery truck and your cargo.
      • -
      • Keep accurate and updated records of your cargo delivery truck and your cargo.
      • -
      • Report any accidents, incidents, or violations involving your cargo delivery truck or your cargo.
      • -
      -

      Conclusion

      -

      Summary of the main points

      -

      A cargo delivery truck is a valuable asset for any business that needs to transport goods, commodities, or cargo. It can offer you many benefits, such as efficiency, security, satisfaction, image, and profitability. However, you need to choose the right cargo delivery truck for your business, depending on your budget, needs, preferences, and options. You also need to maintain and operate your cargo delivery truck safely and efficiently, by following some basic maintenance, security, and compliance tips.

      -

      Call to action

      -

      If you are interested in buying or renting a cargo delivery truck for your business, you can contact us today. We have a wide range of models and brands of cargo delivery trucks that can suit your needs. We also offer competitive prices and flexible financing options. We can help you find the best cargo delivery truck for your business. Don't hesitate to call us or visit our website for more information.

      -

      Frequently Asked Questions

      -

      What is the difference between a cargo van and a cargo delivery truck?

      -

      A cargo van is a type of vehicle that has a smaller and more compact body than a cargo delivery truck. A cargo van can carry less goods than a cargo delivery truck, but it can maneuver more easily in tight spaces and urban areas. A cargo van is suitable for small businesses or short-distance deliveries.

      -

      How much does it cost to operate a cargo delivery truck?

      -

      The cost of operating a cargo delivery truck depends on several factors, such as the fuel consumption, the maintenance expenses, the insurance premiums, the taxes and fees, and the depreciation value. The average cost of operating a cargo delivery truck is estimated to be around $1.38 per mile.

      -

      How long does it take to deliver cargo by truck?

      -

      The time it takes to deliver cargo by truck depends on several factors, such as the distance, the traffic, the weather, the loading and unloading time, and the driver's hours of service. The average speed of a cargo delivery truck is estimated to be around 50 miles per hour. Therefore, it would take about 10 hours to deliver cargo by truck for 500 miles.

      -

      What are the benefits of leasing a cargo delivery truck?

      -

      Leasing a cargo delivery truck can offer you some benefits over buying one, such as:

      -
        -
      • You can save money on upfront costs and monthly payments.
      • -
      • You can avoid depreciation and resale issues.
      • -
      • You can upgrade to newer models more frequently.
      • -
      • You can enjoy tax deductions and incentives.
      • -
      • You can have more flexibility and convenience in managing your fleet.
      • -
      -

      What are the challenges of driving a cargo delivery truck?

      -

      Driving a cargo delivery truck can pose some challenges, such as:

      -
        -
      • You need to have a special license and training to drive a cargo delivery truck.
      • -
      • You need to be aware of the weight, size, and speed limits of your cargo delivery truck.
      • -
      • You need to be careful of blind spots, turns, and lane changes when driving a cargo delivery truck.
      • -
      • You need to cope with fatigue, stress, and boredom when driving a cargo delivery truck for long hours.
      • -
      • You need to deal with traffic congestion, road hazards, and weather conditions when driving a cargo delivery truck.
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Discover the Secrets of Banbans Kindergarten in Garten of Banban 2 APK iOS.md b/spaces/congsaPfin/Manga-OCR/logs/Discover the Secrets of Banbans Kindergarten in Garten of Banban 2 APK iOS.md deleted file mode 100644 index c2dcb232a5ad0ee1d97ef340abd8b14b9149061a..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Discover the Secrets of Banbans Kindergarten in Garten of Banban 2 APK iOS.md +++ /dev/null @@ -1,149 +0,0 @@ - -

      Garten of Banban 2: A Fun and Challenging Adventure Game for Android and iOS

      -

      If you are looking for a new and exciting adventure game to play on your mobile device, you might want to check out Garten of Banban 2. This is a sequel to the popular game Garten of Banban, which was released in 2020. In this game, you will join Banban, a brave and curious boy, as he explores a mysterious garden full of secrets, puzzles, and dangers. You will also meet new friends and enemies along the way, and discover the truth behind the garden's origin.

      -

      garten of banban 2 apk ios


      Download Filehttps://urlca.com/2uOcTT



      -

      What is Garten of Banban 2?

      -

      Garten of Banban 2 is an adventure game developed by Euphoric Brothers Games. The APK has been available since March 2023. It is a top-ranked game on AppBrain, with a rating of 4.24 out of 5 stars, based on 2,033 ratings. The last update of the app was on May 18, 2023. The game has a content rating "Medium Maturity", which means it may contain violence, blood, or suggestive themes.

      -

      The story and the gameplay of Garten of Banban 2

      -

      The game follows the story of Banban, a young boy who loves to explore new places. One day, he finds a mysterious garden in the forest, where he meets a fairy named Lila. She tells him that the garden is actually a portal to another world, where an evil witch named Zara has taken over. Zara has kidnapped Lila's friends and turned them into monsters. She also plans to destroy the garden and use its power to conquer both worlds.

      -

      Banban decides to help Lila and save her friends. He enters the garden and starts his adventure. He will have to solve various puzzles, collect items, fight enemies, and face bosses. He will also learn new skills and abilities along the way, such as jumping, climbing, swimming, flying, and using magic. He will also encounter different characters, some friendly and some hostile. He will have to make choices that will affect the outcome of the story.

      -

      The features and the graphics of Garten of Banban 2

      -

      Garten of Banban 2 has many features that make it an enjoyable and challenging game. Some of these features are:

      -
        -
      • A rich and immersive story with multiple endings
      • -
      • A beautiful and colorful graphics with smooth animations
      • -
      • A simple and intuitive control system with touch screen or joystick options
      • -
      • A variety of levels with different themes, environments, and difficulties
      • -
      • A lot of items, weapons, and upgrades to collect and use
      • -
      • A dynamic soundtrack that matches the mood and atmosphere of the game
      • -
      • A leaderboard and achievements system that tracks your progress and rewards you
      • -
      • A cloud save feature that lets you sync your data across devices
      • -
      -

      How to download and install Garten of Banban 2 on Android and iOS devices

      -

      If you want to play Garten of Banban 2 on your Android or iOS device, you will need to follow these steps:

      -

      garten of banban 2 app store download
      -garten of banban 2 horror game for ipad
      -garten of banban 2 underground facility escape
      -garten of banban 2 euphoric brothers ltd
      -garten of banban 2 ios 14 compatibility
      -garten of banban 2 mac version with m1 chip
      -garten of banban 2 game center support
      -garten of banban 2 family sharing enabled
      -garten of banban 2 sequel to garten of banban
      -garten of banban 2 reviews and ratings
      -garten of banban 2 price and size
      -garten of banban 2 age rating and content
      -garten of banban 2 developer website and privacy policy
      -garten of banban 2 explore the kindergarten secrets
      -garten of banban 2 make more friends in the game
      -garten of banban 2 screenshots and videos
      -garten of banban 2 how to play and tips
      -garten of banban 2 latest updates and news
      -garten of banban 2 best horror games for ios
      -garten of banban 2 adventure games for iphone and ipad
      -garten of banban 2 free download link for ios
      -garten of banban 2 cheats and hacks for ios
      -garten of banban 2 walkthrough and guide for ios
      -garten of banban 2 challenges and achievements for ios
      -garten of banban 2 how to get more coins and gems in the game
      -garten of banban 2 how to unlock all characters and levels in the game
      -garten of banban 2 how to solve puzzles and riddles in the game
      -garten of banban 2 how to survive and escape from the monsters in the game
      -garten of banban 2 how to find all the hidden items and secrets in the game
      -garten of banban 2 how to customize your character and settings in the game
      -garten of banban 2 how to contact the developer and report bugs in the game
      -garten of banban 2 how to share your gameplay and screenshots with friends in the game
      -garten of banban 2 how to join the online community and forums for the game
      -garten of banban 2 how to rate and review the game on the app store
      -garten of banban 2 how to get a refund or cancel your purchase on the app store

      -
        -
      1. Go to AppBrain or Google Play Store for Android devices or App Store for iOS devices
      2. -
      3. Search for "Garten of Banban 2" or click on this link
      4. -
      5. Tap on "Install" or "Get" button and wait for the download to finish. You may need to grant some permissions to the app, such as access to your storage, camera, or microphone
      6. -
      7. Open the app and enjoy playing Garten of Banban 2
      8. -
      -

      Why should you play Garten of Banban 2?

      -

      Garten of Banban 2 is a game that will appeal to anyone who loves adventure, puzzle, and action games. It has a lot of elements that make it fun and engaging, such as:

      -

      The pros and cons of Garten of Banban 2

      -

      Like any game, Garten of Banban 2 has its advantages and disadvantages. Here are some of them:

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      ProsCons
      A captivating and original story with multiple endingsA relatively short game with only 10 chapters
      A stunning and vibrant graphics with smooth animationsA high battery consumption due to the graphics quality
      A simple and intuitive control system with touch screen or joystick optionsA sometimes frustrating control system due to the sensitivity or lag
      A variety of levels with different themes, environments, and difficultiesA lack of checkpoints or save points in some levels
      A lot of items, weapons, and upgrades to collect and useA limited inventory space and a need to buy more slots with real money
      A dynamic soundtrack that matches the mood and atmosphere of the gameA repetitive soundtrack that can get annoying after a while
      A leaderboard and achievements system that tracks your progress and rewards youA competitive and challenging system that can discourage some players
      A cloud save feature that lets you sync your data across devicesA need to have an internet connection to use the cloud save feature
      -

      The ratings and reviews of Garten of Banban 2

      -

      Garten of Banban 2 has received mostly positive ratings and reviews from players and critics. It has a rating of 4.24 out of 5 stars on AppBrain, based on 2,033 ratings. It also has a rating of 4.5 out of 5 stars on Google Play Store, based on 1,234 ratings. It also has a rating of 4.7 out of 5 stars on App Store, based on 567 ratings.

      -

      Some of the positive reviews are:

      -
      "This game is amazing! The graphics are beautiful, the story is captivating, and the puzzles are challenging. I love how you can choose different paths and endings. I highly recommend this game to anyone who loves adventure games."
      -
      "I really enjoyed playing this game. It has a lot of fun and interesting features, such as the items, the skills, the enemies, and the bosses. The game is also very easy to control and play. I think this game is worth every penny."
      -
      "This game is one of the best games I have ever played. It has a great story, great graphics, great music, and great gameplay. It is very addictive and entertaining. I can't wait for the next update or sequel."
      -

      Some of the negative reviews are:

      -
      "This game is good, but it has some flaws. The game is too short, too easy, and too expensive. The game also has some bugs and glitches that need to be fixed. I hope the developers will improve this game in the future."
      -
      "I was disappointed by this game. The game is too hard, too frustrating, and too boring. The game also has some ads and in-app purchases that ruin the experience. I don't recommend this game to anyone."
      -
      "This game is terrible. The graphics are ugly, the story is boring, and the puzzles are stupid. The game is also very laggy and crashes a lot. I wasted my time and money on this game."
      -

      The tips and tricks for playing Garten of Banban 2

      -

      If you want to play Garten of Banban 2 better and faster, you might want to follow these tips and tricks:

      -
        -
      • Explore every corner of the garden and look for hidden items, secrets, and clues.
      • -
      • Use different items and weapons depending on the situation and the enemy.
      • -
      • Upgrade your skills and abilities as soon as possible.
      • -
      • Save your magic for boss battles or tough situations.
      • Watch out for traps and obstacles that can harm you or slow you down. -
      • Pay attention to the hints and instructions that Lila gives you.
      • -
      • Choose wisely when you have to make decisions that affect the story.
      • -
      • Have fun and enjoy the adventure!
      • -
      -

      Conclusion

      -

      Garten of Banban 2 is a fun and challenging adventure game that you can play on your Android or iOS device. It has a captivating and original story, a stunning and vibrant graphics, a simple and intuitive control system, a variety of levels, a lot of items, a dynamic soundtrack, a leaderboard and achievements system, and a cloud save feature. It also has some drawbacks, such as a relatively short game, a high battery consumption, a sometimes frustrating control system, a lack of checkpoints, a limited inventory space, a repetitive soundtrack, and a competitive and challenging system. However, these are minor issues that do not overshadow the overall quality and enjoyment of the game.

      -

      If you are looking for a new and exciting adventure game to play on your mobile device, you should definitely give Garten of Banban 2 a try. You will not regret it. You will join Banban and Lila on their quest to save the garden and their friends from the evil witch Zara. You will explore a mysterious and magical world full of secrets, puzzles, and dangers. You will also have a lot of fun and challenge along the way.

      -

      So what are you waiting for? Download Garten of Banban 2 now and start your adventure!

      -

      Call to action

      -

      If you liked this article, please share it with your friends and family who might be interested in playing Garten of Banban 2. You can also leave a comment below and tell us what you think about the game. We would love to hear from you!

      -

      FAQs

      -

      Here are some frequently asked questions about Garten of Banban 2:

      -
        -
      1. Q: How much does Garten of Banban 2 cost?
      2. -
      3. A: Garten of Banban 2 is free to download and play. However, it has some in-app purchases that can enhance your gaming experience. You can buy more inventory slots, more magic points, more coins, or remove ads with real money.
      4. -
      5. Q: How long does Garten of Banban 2 take to complete?
      6. -
      7. A: Garten of Banban 2 has 10 chapters, each with different levels. The game can take anywhere from 5 to 10 hours to complete, depending on your skill level and the choices you make.
      8. -
      9. Q: How many endings does Garten of Banban 2 have?
      10. -
      11. A: Garten of Banban 2 has four different endings, depending on the choices you make throughout the game. Each ending has different consequences and rewards. You can replay the game to see all the endings.
      12. -
      13. Q: How can I sync my data across devices?
      14. -
      15. A: Garten of Banban 2 has a cloud save feature that lets you sync your data across devices. You will need to have an internet connection and sign in with your Google Play Games or Game Center account. You can then access your data from any device that has the game installed.
      16. -
      17. Q: How can I contact the developers of Garten of Banban 2?
      18. -
      19. A: You can contact the developers of Garten of Banban 2 by sending an email to euphoricbrothersgames@gmail.com or by visiting their website at https://www.euphoricbrothersgames.com/. You can also follow them on Facebook, Twitter, Instagram, or YouTube for more updates and news about their games.
      20. -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Get CPU-Z APK for Free - The Most Trusted Tool for Android Device Info.md b/spaces/congsaPfin/Manga-OCR/logs/Get CPU-Z APK for Free - The Most Trusted Tool for Android Device Info.md deleted file mode 100644 index 3c8cc8f59164f9b343451cf5abdfb79e7aed222b..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Get CPU-Z APK for Free - The Most Trusted Tool for Android Device Info.md +++ /dev/null @@ -1,201 +0,0 @@ -
      -

      How to Download and Install CPU-Z APK Pure on Android

      -

      If you are looking for a way to monitor your Android device's hardware information, such as processor name, core speed, memory type, and more, then you might want to try CPU-Z APK Pure. This is a free tool that gathers detailed information on some of the main devices of your system and lets you perform various tests and validations. In this article, we will show you how to download and install CPU-Z APK Pure on your Android device, as well as how to use it effectively.

      -

      What is CPU-Z APK Pure?

      -

      CPU-Z APK Pure is a modified version of the original CPU-Z app that is available on Google Play Store. It has some extra features and enhancements that make it more useful and convenient for users who want to monitor their device's hardware information. Some of these features include:

      -

      download cpu z apk pure


      DOWNLOAD https://urlca.com/2uO6cn



      -

      Features of CPU-Z APK Pure

      -
        -
      • Support for Windows® 11 and Android® operating systems.
      • -
      • Customized versions for different brands and models of devices, such as ASUS ROG, MSI Gaming, Gigabyte AORUS, etc.
      • -
      • Ability to save and share your device's hardware information online or offline.
      • -
      • Ability to compare your device's performance with other devices in the CPU-Z validation database.
      • -
      • Ability to scan and install unknown sources of APK files on your device.
      • -
      -

      Benefits of using CPU-Z APK Pure

      -
        -
      • You can get accurate and comprehensive information on your device's hardware components, such as processor name and number, codename, process, package, cache levels, mainboard and chipset, memory type, size, timings, and module specifications (SPD), real time measurement of each core's internal frequency, memory frequency, etc.
      • -
      • You can test your device's functioning and stability with features like benchmarking, stress tests, and timers. These features can help you identify any potential issues or problems with your device's hardware performance.
      • -
      • You can validate your device's hardware information online and see how it ranks among other devices in the CPU-Z validation database. You can also share your validation results with others or keep them for your records.
      • -
      • You can install unknown sources of APK files on your device without any hassle. This can help you access apps that are not available on Google Play Store or update apps that are not yet updated on Google Play Store.
      • -
      -

      How to download CPU-Z APK Pure from the web

      -

      One way to get CPU-Z APK Pure on your Android device is to download it from the web. There are many websites that offer APK files for various apps, but you need to be careful about what you download. Not all websites are trustworthy and some may contain malware or viruses that can harm your device. Here are some steps to download CPU-Z APK Pure from the web safely:

      -

      Steps to download CPU-Z APK Pure from the web

      -
        -
      1. Go to a reputable website that offers CPU-Z APK Pure, such as [APKPure.com] or [APKMirror.com]. You can also use a search engine to find other websites that offer CPU-Z APK Pure, but make sure to check their ratings and reviews before downloading anything.
      2. -
      3. Find the CPU-Z APK Pure file that matches your device's brand and model. For example, if you have an ASUS ROG phone, you can download the CPU-Z (ASUS ROG) APK Pure file. If you are not sure which file to download, you can download the generic CPU-Z APK Pure file that works for most devices.
      4. -
      5. Click on the download button or link and wait for the file to be downloaded to your device. You may need to grant permission for the website to access your device's storage or files.
      6. -
      7. Once the file is downloaded, you can find it in your device's download folder or notification bar. You can also use a file manager app to locate the file on your device.
      8. -
      -

      Tips to avoid malware and viruses when downloading APK files

      -
        -
      • Always download APK files from trusted and verified sources. Avoid websites that look suspicious or have poor ratings and reviews.
      • -
      • Always scan the APK files with an antivirus or security app before installing them on your device. This can help you detect and remove any malicious or harmful code that may be hidden in the files.
      • -
      • Always check the permissions and details of the APK files before installing them on your device. Make sure they match the app's description and functionality. Avoid installing APK files that request unnecessary or excessive permissions or access to your device's data or features.
      • -
      -

      How to install CPU-Z APK Pure on your Android device

      -

      After downloading CPU-Z APK Pure from the web, you need to install it on your Android device. This may require some additional steps and settings, depending on your device's security level and preferences. Here are some steps to install CPU-Z APK Pure on your Android device:

      -

      Steps to install CPU-Z APK Pure on your Android device

      -
        -
      1. Go to your device's settings and look for the option that allows you to install apps from unknown sources. This may be under security, privacy, or applications settings, depending on your device's model and operating system. Enable this option if it is disabled by default.
      2. -
      3. Go to your device's download folder or notification bar and tap on the CPU-Z APK Pure file that you downloaded earlier. You may need to grant permission for the file to access your device's storage or files.
      4. -
      5. Follow the instructions on the screen to install CPU-Z APK Pure on your device. You may need to accept the terms and conditions, choose the installation location, and allow some permissions for the app to function properly.
      6. -
      7. Once the installation is complete, you can find CPU-Z APK Pure on your device's app drawer or home screen. You can also launch it from the notification bar or the file manager app.
      8. -
      -

      Troubleshooting tips if you encounter any problems during installation

      -
        -
      • If you get an error message that says "App not installed" or "Installation failed", you may need to uninstall any previous versions of CPU-Z that are already installed on your device. You can do this by going to your device's settings, applications, and finding CPU-Z in the list of installed apps. Tap on it and choose uninstall.
      • -
      • If you get an error message that says "Parse error" or "There was a problem parsing the package", you may have downloaded a corrupted or incompatible CPU-Z APK Pure file. You can try downloading it again from a different source or website, or choose a different version of CPU-Z APK Pure that matches your device's specifications.
      • -
      • If you get an error message that says "This app is harmful" or "This app may damage your device", you may have downloaded a malicious or infected CPU-Z APK Pure file. You should delete it immediately from your device and scan your device with an antivirus or security app. You should also avoid downloading apps from untrusted or unknown sources in the future.
      • -
      -

      How to use CPU-Z APK Pure to monitor your device's hardware information

      -

      Once you have installed CPU-Z APK Pure on your Android device, you can use it to monitor your device's hardware information and perform various tests and validations. Here is how to use CPU-Z APK Pure effectively:

      -

      Overview of the main tabs and features of CPU-Z APK Pure

      -

      CPU-Z APK Pure has a simple and user-friendly interface that consists of several tabs and features. Here is an overview of each tab and feature: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      TabFeatureDescription
      CPUProcessorShows the name, number, codename, process, package, and cache levels of your device's processor.
      CPUClock SpeedShows the real time measurement of each core's internal frequency and the memory frequency.
      CPUTemperatureShows the current temperature of your device's processor and battery.
      CPUBenchmarkAllows you to perform a single-thread or multi-thread benchmark test and compare your device's performance with other devices.
      CPUStress TestAllows you to perform a stress test on your device's processor and memory to check their stability and functioning.
      CPUTimer TestAllows you to perform a timer test on your device's processor to check its accuracy and precision.
      CPUValidationAllows you to validate your device's hardware information online and see how it ranks among other devices in the CPU-Z validation database.
      MainboardMainboard and ChipsetShows the name, model, vendor, BIOS, and chipset of your device's mainboard.
      MainboardMemory Type, Size, and TimingsShows the type, size, and timings of your device's memory modules.
      MainboardModule Specifications (SPD)Shows the specifications of your device's memory modules, such as manufacturer, part number, serial number, etc.
      DeviceModel and Android VersionShows the name, model, and Android version of your device.
      DeviceScreen Size and ResolutionShows the size and resolution of your device's screen.
      DeviceSensorsShows the list and status of your device's sensors, such as accelerometer, gyroscope, magnetometer, etc.
      DeviceBattery Level and CapacityShows the current level and capacity of your device's battery.
      DeviceNetwork Type and Signal StrengthShows the type and signal strength of your device's network connection.
      SystemAndroid System InformationShows the information about your device's Android system, such as kernel version, build number, root access, etc.
      System >User Apps >Shows the list and details of the user apps installed on your device. >
      System >System Apps >Shows the list and details of the system apps installed on your device. >
      About >CPU-Z Version and Developer Information >Shows the version and developer information of CPU-Z APK Pure. >

      You can switch between these tabs and features by swiping left or right on the screen. You can also tap on the menu icon on the top left corner of the screen to access more options and settings.

      How to perform benchmarks, stress tests, and validations with CPU-Z APK Pure

      Besides monitoring your device's hardware information, you can also use CPU-Z APK Pure to perform benchmarks, stress tests, and validations. These are useful features that can help you test your device's performance and stability. Here is how to use them:

      • To perform a benchmark test, go to the CPU tab and tap on the benchmark icon on the top right corner of the screen. You can choose to perform a single-thread or multi-thread benchmark test. The test will measure your device's processor speed and performance in terms of MIPS (million instructions per second). The test will take a few seconds to complete and show you the results. You can compare your results with other devices in the CPU-Z validation database by tapping on the compare icon on the top right corner of the screen.
      • To perform a stress test, go to the CPU tab and tap on the stress test icon on the top right corner of the screen. You can choose to stress test your processor or memory. The test will put a high load on your device's hardware components and measure their temperature and frequency. The test will run until you stop it or until your device reaches a critical temperature. You can monitor the results on the screen or on a notification bar widget. You can stop the test by tapping on the stop icon on the top right corner of the screen.
      • To perform a validation, go to the CPU tab and tap on the validation icon on the top right corner of the screen. The validation will gather your device's hardware information and upload it to the CPU-Z validation database online. You will need an internet connection for this feature. The validation will take a few seconds to complete and show you a link to your validation page online. You can share this link with others or keep it for your records. You can also see how your device ranks among other devices in the CPU-Z validation database by tapping on the rank icon on the top right corner of the screen.

      Conclusion

      CPU-Z APK Pure is a powerful tool that can help you monitor your Android device's hardware information and perform various tests and validations. It is easy to download and install from the web, as long as you follow some precautions and tips. It has a simple and user-friendly interface that lets you access various tabs and features with a swipe or a tap. It can help you get accurate and comprehensive information on your device's hardware components, such as processor name and number, codename, process, package, cache levels, mainboard and chipset, memory type, size, timings, and module specifications (SPD), real time measurement of each core's internal frequency, memory frequency, etc. It can also help you test your device's functioning and stability with features like benchmarking, stress tests, and timers. You can also validate your device's hardware information online and see how it ranks among other devices in the CPU-Z validation database. You can also install unknown sources of APK files on your device without any hassle.

      -

      If you are looking for a way to monitor your Android device's hardware information, then you should definitely try CPU-Z APK Pure. It is a free tool that offers many features and benefits that can help you optimize your device's performance and functionality. You can download it from the web safely and easily by following the steps and tips we have provided in this article. You can also use it effectively by following the instructions and guidelines we have provided in this article. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to contact us or leave a comment below.

      -

      download cpu z apk latest version
      -download cpu z apk for android
      -download cpu z apk from official site
      -download cpu z apk mirror
      -download cpu z apk mod
      -download cpu z apk no ads
      -download cpu z apk offline
      -download cpu z apk old version
      -download cpu z apk pro
      -download cpu z apk premium
      -download cpu z apk uptodown
      -download cpu z apk xda
      -download cpu z apk 2023
      -download cpu z apk 1.43
      -download cpu z apk 1.07
      -how to download cpu z apk
      -where to download cpu z apk
      -why download cpu z apk
      -what is cpu z apk
      -is cpu z apk safe
      -is cpu z apk free
      -is cpu z apk accurate
      -is cpu z apk compatible with my device
      -is cpu z apk better than other apps
      -is cpu z apk updated regularly
      -best site to download cpu z apk
      -best alternative to cpu z apk
      -best way to use cpu z apk
      -benefits of downloading cpu z apk
      -features of cpu z apk
      -reviews of cpu z apk
      -ratings of cpu z apk
      -comparison of cpu z apk and other apps
      -tips and tricks for using cpu z apk
      -troubleshooting for downloading and installing cpu z apk
      -how to uninstall cpu z apk
      -how to update cpu z apk
      -how to share cpu z apk with others
      -how to backup and restore data from cpu z apk
      -how to customize settings in cpu z apk
      -how to monitor hardware information with cpu z apk
      -how to optimize performance with cpu z apk
      -how to test stability with cpu z apk
      -how to benchmark your device with cpu z apk
      -how to export and import data from cpu z apk
      -how to report bugs and feedback for cpu z apk
      -how to contact the developer of cpu z apk
      -how to support the development of cpu z apk

      -

      FAQs

      -

      What are the system requirements for CPU-Z APK Pure?

      -

      CPU-Z APK Pure requires an Android device that runs on Android 4.2 or higher. It also requires about 10 MB of free storage space on your device.

      -

      Is CPU-Z APK Pure safe and legal to use?

      -

      CPU-Z APK Pure is safe and legal to use as long as you download it from a trusted and verified source. It does not contain any malware or viruses that can harm your device. It also does not violate any laws or regulations that govern the use of apps on Android devices.

      -

      How can I update CPU-Z APK Pure to the latest version?

      -

      You can update CPU-Z APK Pure to the latest version by downloading it again from the web or by using the built-in update feature in the app. To use the update feature, go to the menu icon on the top left corner of the screen and tap on check for updates. The app will check for any available updates and prompt you to download and install them if there are any.

      -

      What are some alternatives to CPU-Z APK Pure?

      -

      Some alternatives to CPU-Z APK Pure are AIDA64, Geekbench, AnTuTu Benchmark, and Device Info HW. These are also apps that can help you monitor your device's hardware information and perform various tests and validations.

      -

      How can I contact the developers of CPU-Z APK Pure?

      -

      You can contact the developers of CPU-Z APK Pure by visiting their official website at [https://www.cpuid.com/]. You can also send them an email at [support@cpuid.com] or follow them on social media platforms such as Facebook, Twitter, and YouTube.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Online Caste Certificate Apply Download and Check Status.md b/spaces/congsaPfin/Manga-OCR/logs/Online Caste Certificate Apply Download and Check Status.md deleted file mode 100644 index 86a730ab1f8b4f05f9ff8078fabb3333123b38e5..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Online Caste Certificate Apply Download and Check Status.md +++ /dev/null @@ -1,171 +0,0 @@ -
      -

      Download Online Caste Certificate: A Complete Guide

      -

      A caste certificate is a document that certifies your belonging to a particular caste, especially if you belong to any of the Scheduled Castes (SC), Scheduled Tribes (ST), or Other Backward Classes (OBC) as specified in the Indian Constitution. A caste certificate is important for every individual in India, as it can help you avail various benefits and schemes offered by the government based on your caste.

      -

      download online caste certificate


      Download File ►►►►► https://urlca.com/2uO9jJ



      -

      What is a caste certificate and why do you need it?

      -

      A caste certificate is a proof of your identity and social status in India. It can help you claim reservations and concessions in education, employment, politics, and other fields. It can also help you access various welfare schemes and programs launched by the central and state governments for the upliftment and empowerment of the backward classes. Some of the benefits of having a caste certificate are:

      -

      Benefits of having a caste certificate

      -
        -
      • You can get admission in educational institutions under reserved quotas.
      • -
      • You can apply for government jobs under reserved categories.
      • -
      • You can contest elections from reserved constituencies.
      • -
      • You can get financial assistance and scholarships from the government.
      • -
      • You can get subsidies and exemptions on taxes, fees, and charges.
      • -
      • You can get preference in allotment of land, housing, and other resources.
      • -
      -

      Eligibility criteria for applying for a caste certificate

      -

      To apply for a caste certificate, you must fulfill the following eligibility criteria:

      -
        -
      • You must be a citizen of India.
      • -
      • You must belong to a caste that is recognized as SC, ST, or OBC by the central or state government.
      • -
      • You must have a permanent residence in the state where you are applying for the caste certificate.
      • -
      • You must have valid documents to prove your identity, address, and caste.
      • -
      -

      How to apply for a caste certificate online in different states?

      -

      At present, only a few of the state governments offer an online application to apply for a caste certificate. In order to apply for a caste certificate online, you need to visit the respective state's official backward class welfare department portal and follow the steps given below. However, these steps may vary slightly depending on the state and the portal.

      -

      West Bengal

      -
        -
      1. Visit the Backward Classes Welfare Department portal of West Bengal.
      2. -
      3. Click on \"Apply Online\" link under \"Online Application\" section.
      4. -
      5. Create a new account in the portal or log in with your existing account.
      6. -
      7. Fill in the details in the online form such as name, address, contact details, personal details, local reference details, etc.
      8. -
      9. Verify all the details you have filled in the online form and if you agree with terms and conditions click \"Submit\" button.
      10. -
      11. Upload the scanned copies of documents such as identity proof, address proof, income proof, etc. and click \"Submit\" button.
      12. -
      13. Once submitted, you will get an acknowledgement slip along with an application number. You can take a print out of the acknowledgement slip or save it offline.
      14. -
      15. You can also check the status of your application online by clicking on \"Check Status\" link under \"Online Application\" section. You will need your application number for that.
      16. -
      17. After the verification is done successfully, you will receive an SMS or email notification about the approval of your caste certificate. You can then download your caste certificate online by following the steps given below.
      18. -
      -

      Bihar

      -
        -
      1. Visit the Bihar Right to Public Service portal.
      2. -
      3. Click on \"Apply Online\" link under \"Caste Certificate\" section.
      4. -
      5. Create a new account in the portal or log in with your existing account.
      6. -
      7. Fill in the details in the online form such as name, address, contact details, personal details, caste details, etc.
      8. -
      9. Verify all the details you have filled in the online form and if you agree with terms and conditions click \"Submit\" button.
      10. -
      11. Upload the scanned copies of documents such as identity proof, address proof, caste proof, etc. and click \"Submit\" button.
      12. -
      13. Once submitted, you will get an acknowledgement slip along with an application number. You can take a print out of the acknowledgement slip or save it offline.
      14. -
      15. You can also check the status of your application online by clicking on \"View Status of Application\" link under \"Caste Certificate\" section. You will need your application number for that.
      16. -
      17. After the verification is done successfully, you will receive an SMS or email notification about the approval of your caste certificate. You can then download your caste certificate online by following the steps given below.
      18. -
      -

      Other states

      -

      If you belong to any other state that does not have an online portal for applying for a caste certificate, you can still apply online through the e-District portal of the central government. The steps are similar to the ones mentioned above, except that you need to select your state and district from the drop-down menu before filling in the online form. You also need to select the service name as \"Caste Certificate\" from the list of services available. The rest of the steps are same as above.

      -

      How to apply for a caste certificate offline in any state?

      -

      If you are not comfortable with applying for a caste certificate online, or if you do not have access to internet or computer, you can also apply for a caste certificate offline by visiting your nearest tehsil office, revenue office, or backward class welfare office. You need to follow the steps given below:

      -

      Documents required for offline application

      -

      You need to carry the following documents with you when you apply for a caste certificate offline:

      -

      How to download online caste certificate in West Bengal[^1^]
      -Online application for caste certificate and validity certificate[^2^]
      -RTPS Bihar online caste certificate download[^3^]
      -Download online caste certificate from DigiLocker
      -Online caste certificate verification and download
      -Download online caste certificate for SC/ST/OBC
      -Online caste certificate status and download
      -Download online caste certificate for education and employment
      -Online caste certificate portal and download
      -Download online caste certificate from ServicePlus
      -Online caste certificate fees and download
      -Download online caste certificate for marriage and reservation
      -Online caste certificate renewal and download
      -Download online caste certificate from e-District
      -Online caste certificate format and download
      -Download online caste certificate for students and farmers
      -Online caste certificate registration and download
      -Download online caste certificate from CSC / SETU centers
      -Online caste certificate correction and download
      -Download online caste certificate from SDM office
      -Online caste certificate affidavit and download
      -Download online caste certificate for bank loan and subsidy
      -Online caste certificate helpline and download
      -Download online caste certificate from Tehsil office
      -Online caste certificate application form and download
      -Download online caste certificate for passport and visa
      -Online caste certificate required documents and download
      -Download online caste certificate from Revenue office
      -Online caste certificate benefits and download
      -Download online caste certificate for PAN card and Aadhaar card
      -Online caste certificate eligibility and download
      -Download online caste certificate from Backward Classes Welfare Department
      -Online caste certificate rules and regulations and download
      -Download online caste certificate from KSTAR facility
      -Online caste certificate sample and download
      -Download online caste certificate for property and inheritance
      -Online caste certificate enquiry and download
      -Download online caste certificate from District Magistrate office
      -Online caste certificate procedure and download
      -Download online caste certificate from BDO office
      -Online caste certificate checklist and download
      -Download online caste certificate for scholarship and admission
      -Online caste certificate FAQ and download
      -Download online caste certificate from Panchayat office
      -Online caste certificate grievance redressal and download
      -Download online caste certificate for travel concession and quota
      -Online caste certificate validity period and download
      -Download online caste certificate from NIC portal

      -
        -
      • A duly filled and signed application form (you can get it from the office or download it from the state portal)
      • -
      • A self-attested copy of identity proof such as Aadhaar card, PAN card, voter ID card, etc.
      • -
      • A self-attested copy of address proof such as ration card, electricity bill, water bill, etc.
      • -
      • A self-attested copy of caste proof such as old caste certificate, family tree, community certificate, etc.
      • -
      • A self-attested copy of income proof such as salary slip, income tax return, etc. (if applicable)
      • -
      • Two passport size photographs
      • -
      • An affidavit sworn before a magistrate or notary public stating your caste and other details
      • -

      Steps to follow for offline application

      -
        -
      1. Visit your nearest tehsil office, revenue office, or backward class welfare office and get the application form for caste certificate. You can also download it from the state portal and print it out.
      2. -
      3. Fill in the details in the application form such as name, address, contact details, personal details, caste details, etc.
      4. -
      5. Attach the self-attested copies of documents and photographs along with the application form.
      6. -
      7. Submit the application form and documents to the concerned authority and pay the prescribed fee (if any).
      8. -
      9. Collect the acknowledgement slip or receipt from the authority. It will have your application number and date of submission.
      10. -
      11. You can check the status of your application offline by visiting the same office where you submitted your application. You will need your acknowledgement slip or receipt for that.
      12. -
      13. After the verification is done successfully, you will receive a notification from the authority about the approval of your caste certificate. You can then collect your caste certificate from the same office by showing your acknowledgement slip or receipt.
      14. -
      -

      How to check the status of your caste certificate application online?

      -

      If you have applied for a caste certificate online, you can check the status of your application online by visiting the respective state's official backward class welfare department portal or the e-District portal of the central government. You need to follow the steps given below:

      -

      Common steps for checking the status online

      -
        -
      1. Visit the portal where you applied for a caste certificate online.
      2. -
      3. Click on \"Check Status\" link under \"Online Application\" section or \"View Status of Application\" link under \"Caste Certificate\" section.
      4. -
      5. Enter your application number and captcha code (if any) and click on \"Submit\" button.
      6. -
      7. You will see the current status of your application on the screen. It may be pending, approved, rejected, or ready for download.
      8. -
      -

      State-specific portals for checking the status online

      - - - - - -
      StatePortal
      West BengalBackward Classes Welfare Department portal
      BiharBihar Right to Public Service portal
      Other statese-District portal
      -

      How to download your caste certificate online?

      -

      If you have applied for a caste certificate online and your application has been approved, you can download your caste certificate online by visiting the respective state's official backward class welfare department portal or the e-District portal of the central government. You need to follow the steps given below:

      -

      Common steps for downloading the caste certificate online

      -
        -
      1. Visit the portal where you applied for a caste certificate online.
      2. -
      3. Click on \"Download Certificate\" link under \"Online Application\" section or \"Download Caste Certificate\" link under \"Caste Certificate\" section.
      4. -
      5. Enter your application number and captcha code (if any) and click on \"Submit\" button.
      6. -
      7. You will see a preview of your caste certificate on the screen. You can verify the details and click on \"Download\" button.
      8. -
      9. You will get a PDF file of your caste certificate. You can save it offline or take a print out of it.
      10. -
      -

      State-specific portals for downloading the caste certificate online

      - - - - - -
      StatePortal
      West BengalBackward Classes Welfare Department portal
      BiharBihar Right to Public Service portal
      Other statese-District portal
      -

      Conclusion

      -

      A caste certificate is a vital document that can help you avail various benefits and schemes offered by the government based on your caste. You can apply for a caste certificate online or offline depending on your convenience and availability of resources. You can also check the status and download your caste certificate online by following some simple steps. However, you must ensure that you have all the required documents and information before applying for a caste certificate. You must also keep your application number and acknowledgement slip safe for future reference. We hope this article has helped you understand how to download online caste certificate in India.

      -

      Frequently Asked Questions

      -
        -
      • Q: How long does it take to get a caste certificate online?
      • -
      • A: The time taken to get a caste certificate online may vary depending on the state and the portal. However, generally, it may take anywhere between 15 to 30 days for the verification and approval of your application.
      • -
      • Q: What if I lose my caste certificate or it gets damaged?
      • -
      • A: If you lose your caste certificate or it gets damaged, you can apply for a duplicate caste certificate online or offline by following the same steps as mentioned above. You will need to provide your application number and a copy of your FIR or affidavit stating the loss or damage of your original caste certificate.
      • -
      • Q: Can I apply for a caste certificate in a different state than where I reside?
      • -
      • A: No, you cannot apply for a caste certificate in a different state than where you reside. You must apply for a caste certificate in the state where you have a permanent residence and where your caste is recognized by the government.
      • -
      • Q: What if I belong to more than one caste or sub-caste?
      • -
      • A: If you belong to more than one caste or sub-caste, you can apply for a caste certificate for any one of them. However, you cannot apply for multiple caste certificates for different castes or sub-castes.
      • -
      • Q: What if I change my name, address, or caste after getting a caste certificate?
      • -
      • A: If you change your name, address, or caste after getting a caste certificate, you will need to apply for a new caste certificate with the updated details. You will need to provide proof of your name change, address change, or caste change along with your old caste certificate.
      • -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/World Provinces Mod APK A Map Game with More than 200 Countries and Flags.md b/spaces/congsaPfin/Manga-OCR/logs/World Provinces Mod APK A Map Game with More than 200 Countries and Flags.md deleted file mode 100644 index 1aba4a426a9677af79606f3bd36da9c03777b1e5..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/World Provinces Mod APK A Map Game with More than 200 Countries and Flags.md +++ /dev/null @@ -1,96 +0,0 @@ - -

      World Provinces Mod APK: A Game for Geography Lovers

      -

      Do you love geography and maps? Do you want to create your own world with more than 4400 provinces? If yes, then you should try World Provinces Mod APK, a game that lets you design your own world map with history, politics, and flags. In this article, we will tell you what World Provinces Mod APK is, why you should play it, how to play it, and answer some frequently asked questions about it.

      -

      What is World Provinces Mod APK?

      -

      A brief introduction to the game and its features

      -

      World Provinces Mod APK is a modified version of World Provinces. Empire. Maps., a game developed by Uvdb.eu. The game is available for Android devices and can be downloaded from APKPure.com. The game allows you to create your own world map with more than 4400 provinces. You can choose from more than 200 built-in countries with flags, or create your own custom countries. You can also edit the history, geography, and politics of each province, and add events, wars, alliances, and more. You can save your world map as an image or a text file, and share it with other players.

      -

      world provinces mod apk


      Download File ››››› https://urlca.com/2uOg5d



      -

      How to download and install the mod apk file

      -

      To download and install World Provinces Mod APK, you need to follow these steps:

      -
        -
      1. Go to APKPure.com and search for World Provinces. Empire. Maps.
      2. -
      3. Click on the download button and wait for the mod apk file to be downloaded.
      4. -
      5. Go to your device settings and enable the installation of apps from unknown sources.
      6. -
      7. Locate the mod apk file in your device storage and tap on it to install it.
      8. -
      9. Launch the game and enjoy creating your own world map.
      10. -
      -

      Why Play World Provinces Mod APK?

      -

      The benefits of playing the game for learning and fun

      -

      World Provinces Mod APK is not only a game, but also a learning tool. By playing the game, you can improve your knowledge of geography, history, and politics. You can learn about different countries, regions, cultures, and flags. You can also explore different scenarios and outcomes of historical events, such as wars, revolutions, colonizations, etc. You can test your creativity and imagination by designing your own world map with your own rules and preferences. You can also have fun by creating fictional or fantasy worlds with magic, dragons, aliens, etc.

      -

      world provinces empire mod apk
      -world provinces game mod apk
      -world provinces map mod apk
      -world provinces simulator mod apk
      -world provinces strategy mod apk
      -world provinces hack mod apk
      -world provinces unlimited money mod apk
      -world provinces free download mod apk
      -world provinces latest version mod apk
      -world provinces offline mod apk
      -world provinces android mod apk
      -world provinces ios mod apk
      -world provinces pc mod apk
      -world provinces windows mod apk
      -world provinces mac mod apk
      -world provinces linux mod apk
      -world provinces online mod apk
      -world provinces multiplayer mod apk
      -world provinces sandbox mod apk
      -world provinces cheats mod apk
      -world provinces premium mod apk
      -world provinces full version mod apk
      -world provinces unlocked mod apk
      -world provinces no ads mod apk
      -world provinces pro mod apk
      -world provinces 4400+ countries mod apk
      -world provinces create your own map mod apk
      -world provinces custom flags mod apk
      -world provinces editor mode mod apk
      -world provinces realistic graphics mod apk
      -world provinces historical scenarios mod apk
      -world provinces alternative history mod apk
      -world provinces fantasy maps mod apk
      -world provinces random events mod apk
      -world provinces diplomacy system mod apk
      -world provinces trade system mod apk
      -world provinces war system mod apk
      -world provinces peace system mod apk
      -world provinces alliance system mod apk
      -world provinces vassal system mod apk
      -world provinces revolution system mod apk
      -world provinces colonization system mod apk
      -world provinces culture system mod apk
      -world provinces religion system mod apk
      -world provinces technology system mod apk
      -world provinces population system mod apk
      -world provinces economy system mod apk
      -world provinces development system mod apk

      -

      The challenges and rewards of creating your own world map

      -

      Creating your own world map with World Provinces Mod APK is not easy. You need to consider many factors, such as the size, shape, location, climate, population, economy, religion, culture, government, etc. of each province. You need to balance realism and fantasy, logic and creativity. You need to make sure that your world map is consistent and coherent. You also need to deal with potential conflicts and challenges that may arise from your choices, such as wars, rebellions, disasters, etc. However, creating your own world map with World Provinces Mod APK is also rewarding. You can express your creativity and personality through your world map. You can share your world map with other players and get feedback and appreciation. You can also play with your world map and see how it evolves and changes over time.

      -

      How to Play World Provinces Mod APK?

      -

      The basic gameplay and controls of the game

      -

      The gameplay of World Provinces Mod APK is simple and intuitive. You can start by choosing a blank world map or a preset world map from the game. You can then edit the provinces by tapping on them and selecting the options from the menu. You can change the name, flag, color, history, geography, politics, etc. of each province. You can also add or remove provinces by using the tools on the bottom of the screen. You can zoom in or out by pinching the screen, and move the map by dragging it. You can save your world map by tapping on the save button on the top right corner of the screen.

      -

      The tips and tricks for designing your own world map

      -

      Here are some tips and tricks for designing your own world map with World Provinces Mod APK:

      -
        -
      • Use a reference map or a real-world map as a guide for creating your world map. You can also use online resources, such as Wikipedia, Google Maps, etc., to get information about different countries and regions.
      • -
      • Think about the theme and style of your world map. Do you want it to be realistic or fantasy? Do you want it to be historical or futuristic? Do you want it to be peaceful or war-torn? Do you want it to be diverse or homogeneous?
      • -
      • Think about the details and consistency of your world map. How do the provinces relate to each other? How do they interact and influence each other? How do they reflect their history, culture, and environment? How do they cope with their problems and opportunities?
      • -
      • Think about the balance and variety of your world map. How do you distribute the land and water, the mountains and plains, the forests and deserts, etc.? How do you create contrast and harmony among the provinces? How do you make your world map interesting and unique?
      • -
      -

      Conclusion

      -

      A summary of the main points and a call to action

      -

      World Provinces Mod APK is a game that lets you create your own world map with more than 4400 provinces. You can customize every aspect of your world map, such as the name, flag, history, geography, politics, etc. of each province. You can also play with your world map and see how it changes over time. World Provinces Mod APK is a game that is fun and educational. You can learn about geography, history, and politics while expressing your creativity and imagination. You can also share your world map with other players and get feedback and appreciation. If you are a geography lover, you should download World Provinces Mod APK from APKPure.com and start creating your own world today.

      -

      FAQs

      -

      What are the requirements for playing World Provinces Mod APK?

      -

      You need an Android device with Android 4.4 or higher version to play World Provinces Mod APK. You also need at least 100 MB of free storage space on your device.

      -

      Is World Provinces Mod APK safe and legal?

      -

      World Provinces Mod APK is safe and legal to download and play. It does not contain any viruses or malware that may harm your device or data. It also does not violate any laws or regulations that may affect your rights or privacy.

      -

      How can I update World Provinces Mod APK?

      -

      You can update World Provinces Mod APK by visiting APKPure.com and downloading the latest version of the mod apk file. You can then install it over the existing version without losing your data or progress.

      -

      Can I share my world map with other players?

      -

      Yes, you can share your world map with other players by saving it as an image or a text file. You can then send it to other players via email, social media, or other platforms. You can also upload it to WorldProvinces.com, a website where you can browse and download other players' world maps.

      -

      Where can I find more information about World Provinces Mod APK?

      -

      You can find more information about World Provinces Mod APK by visiting the official website of the game developer, Uvdb.eu, or the official Facebook page of the game, World Provinces. Empire. Maps.. You can also contact the game developer by email at uvdb.eu@gmail.com or by phone at +48 123 456 789.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/ANCL hack without verification Leaf Tickets and Bells The best way to enjoy the game.md b/spaces/contluForse/HuggingGPT/assets/ANCL hack without verification Leaf Tickets and Bells The best way to enjoy the game.md deleted file mode 100644 index 90c02eb669073e0b69b0015a1ea5bc27d1938ec4..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/ANCL hack without verification Leaf Tickets and Bells The best way to enjoy the game.md +++ /dev/null @@ -1,6 +0,0 @@ -

      ANCL hack without verification Leaf Tickets and Bells


      DOWNLOADhttps://ssurll.com/2uzvNF



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/contluForse/HuggingGPT/assets/Chehere In Hd Download The Film That Has Been Making Waves in Bollywood.md b/spaces/contluForse/HuggingGPT/assets/Chehere In Hd Download The Film That Has Been Making Waves in Bollywood.md deleted file mode 100644 index 3337f651dfdb3d481e76672aacfc655d9d18ff47..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Chehere In Hd Download The Film That Has Been Making Waves in Bollywood.md +++ /dev/null @@ -1,13 +0,0 @@ - -

      Description : Chehre mp3 song download by Harish Verma in album Chehre. The song Chehre is Lyrics by Sandhu Kuldeep Music by Starboy Music X Label Jass Records. Chehre Harish Verma mp3 song belongs to Single Track and Chehre release on Dec 21, 2018. Chehre song playtime is 3:58 minute

      -

      Mp3 Juice is the most popular free mp3 search engine tool and music downloader, is very popular. MP3 Juice is a great tool to convert and download youtube videos and music. The Mp3 Juice website is the best way to quickly and easily download mp3 music. Its simplicity makes Mp3juice easy to use, so anyone can search for and download high-quality audio files

      -

      Chehere In Hd Download


      Download Filehttps://ssurll.com/2uzxRM



      -

      You can also copy and paste the Youtube URL and hit the convert button. This will convert the youtube video into mp3. After you click the search button, conversion will begin. Your mp3 music file will be available for download in a matter of minutes.

      -

      This website offers unlimited downloading of youtube music and Mp3 juice song free download in HD quality. You can also click "PLAY" to play the audio file before you download it. Mp3juices take only 2-5 seconds to convert and download audio files.

      -

      The mp3juices website has no viruses and is completely safe to use. It's also a great alternative to paid mp3 music downloading tools. Mp3juice can be accessed in many languages. You can use it to convert your YouTube videos to mp3 format.

      -

      You can access this free mp3 download website online via an internet connection or WiFi. Bookmark this website to make it easy to access on a regular basis. Once you have downloaded the audio file, open it in any audio player to listen offline in high-quality.

      -

      MP3 juice music is easy to navigate through and provides a simple interface for downloading the audio. You might be wondering why people prefer mp3juices to get mp3 juice for free. This tool provides high-speed audio downloads, and users don't need to give any personal information.

      -

      -

      It is easy to download mp3 juice by visiting the website and entering the song name into the search box or pasting the URL. Select one search result and then convert it to audio by clicking the download button. Finally, hit the Download button to get the audio file at high speeds.

      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/EASEUS Partition Master 15.2 Technican Edition Crack .rar A Review of the Software.md b/spaces/contluForse/HuggingGPT/assets/EASEUS Partition Master 15.2 Technican Edition Crack .rar A Review of the Software.md deleted file mode 100644 index f998fbcc91330ee9db8170764618f9cd768c32b8..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/EASEUS Partition Master 15.2 Technican Edition Crack .rar A Review of the Software.md +++ /dev/null @@ -1,6 +0,0 @@ -

      EASEUS Partition Master 15.2 Technican Edition Crack .rar


      DOWNLOADhttps://ssurll.com/2uzvH5



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/cooelf/Multimodal-CoT/timm/models/res2net.py b/spaces/cooelf/Multimodal-CoT/timm/models/res2net.py deleted file mode 100644 index 282baba3b04f7805b16ffeaef55dd2b19b434f0c..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/models/res2net.py +++ /dev/null @@ -1,216 +0,0 @@ -""" Res2Net and Res2NeXt -Adapted from Official Pytorch impl at: https://github.com/gasvn/Res2Net/ -Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 -""" -import math - -import torch -import torch.nn as nn - -from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD -from .helpers import build_model_with_cfg -from .registry import register_model -from .resnet import ResNet - -__all__ = [] - - -def _cfg(url='', **kwargs): - return { - 'url': url, - 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), - 'crop_pct': 0.875, 'interpolation': 'bilinear', - 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, - 'first_conv': 'conv1', 'classifier': 'fc', - **kwargs - } - - -default_cfgs = { - 'res2net50_26w_4s': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_4s-06e79181.pth'), - 'res2net50_48w_2s': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_48w_2s-afed724a.pth'), - 'res2net50_14w_8s': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_14w_8s-6527dddc.pth'), - 'res2net50_26w_6s': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_6s-19041792.pth'), - 'res2net50_26w_8s': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_8s-2c7c9f12.pth'), - 'res2net101_26w_4s': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net101_26w_4s-02a759a1.pth'), - 'res2next50': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next50_4s-6ef7e7bf.pth'), -} - - -class Bottle2neck(nn.Module): - """ Res2Net/Res2NeXT Bottleneck - Adapted from https://github.com/gasvn/Res2Net/blob/master/res2net.py - """ - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None, - cardinality=1, base_width=26, scale=4, dilation=1, first_dilation=None, - act_layer=nn.ReLU, norm_layer=None, attn_layer=None, **_): - super(Bottle2neck, self).__init__() - self.scale = scale - self.is_first = stride > 1 or downsample is not None - self.num_scales = max(1, scale - 1) - width = int(math.floor(planes * (base_width / 64.0))) * cardinality - self.width = width - outplanes = planes * self.expansion - first_dilation = first_dilation or dilation - - self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False) - self.bn1 = norm_layer(width * scale) - - convs = [] - bns = [] - for i in range(self.num_scales): - convs.append(nn.Conv2d( - width, width, kernel_size=3, stride=stride, padding=first_dilation, - dilation=first_dilation, groups=cardinality, bias=False)) - bns.append(norm_layer(width)) - self.convs = nn.ModuleList(convs) - self.bns = nn.ModuleList(bns) - if self.is_first: - # FIXME this should probably have count_include_pad=False, but hurts original weights - self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) - else: - self.pool = None - - self.conv3 = nn.Conv2d(width * scale, outplanes, kernel_size=1, bias=False) - self.bn3 = norm_layer(outplanes) - self.se = attn_layer(outplanes) if attn_layer is not None else None - - self.relu = act_layer(inplace=True) - self.downsample = downsample - - def zero_init_last_bn(self): - nn.init.zeros_(self.bn3.weight) - - def forward(self, x): - shortcut = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - spx = torch.split(out, self.width, 1) - spo = [] - sp = spx[0] # redundant, for torchscript - for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): - if i == 0 or self.is_first: - sp = spx[i] - else: - sp = sp + spx[i] - sp = conv(sp) - sp = bn(sp) - sp = self.relu(sp) - spo.append(sp) - if self.scale > 1: - if self.pool is not None: - # self.is_first == True, None check for torchscript - spo.append(self.pool(spx[-1])) - else: - spo.append(spx[-1]) - out = torch.cat(spo, 1) - - out = self.conv3(out) - out = self.bn3(out) - - if self.se is not None: - out = self.se(out) - - if self.downsample is not None: - shortcut = self.downsample(x) - - out += shortcut - out = self.relu(out) - - return out - - -def _create_res2net(variant, pretrained=False, **kwargs): - return build_model_with_cfg( - ResNet, variant, pretrained, - default_cfg=default_cfgs[variant], - **kwargs) - - -@register_model -def res2net50_26w_4s(pretrained=False, **kwargs): - """Constructs a Res2Net-50 26w4s model. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model_args = dict( - block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=4), **kwargs) - return _create_res2net('res2net50_26w_4s', pretrained, **model_args) - - -@register_model -def res2net101_26w_4s(pretrained=False, **kwargs): - """Constructs a Res2Net-101 26w4s model. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model_args = dict( - block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, block_args=dict(scale=4), **kwargs) - return _create_res2net('res2net101_26w_4s', pretrained, **model_args) - - -@register_model -def res2net50_26w_6s(pretrained=False, **kwargs): - """Constructs a Res2Net-50 26w6s model. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model_args = dict( - block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=6), **kwargs) - return _create_res2net('res2net50_26w_6s', pretrained, **model_args) - - -@register_model -def res2net50_26w_8s(pretrained=False, **kwargs): - """Constructs a Res2Net-50 26w8s model. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model_args = dict( - block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=8), **kwargs) - return _create_res2net('res2net50_26w_8s', pretrained, **model_args) - - -@register_model -def res2net50_48w_2s(pretrained=False, **kwargs): - """Constructs a Res2Net-50 48w2s model. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model_args = dict( - block=Bottle2neck, layers=[3, 4, 6, 3], base_width=48, block_args=dict(scale=2), **kwargs) - return _create_res2net('res2net50_48w_2s', pretrained, **model_args) - - -@register_model -def res2net50_14w_8s(pretrained=False, **kwargs): - """Constructs a Res2Net-50 14w8s model. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model_args = dict( - block=Bottle2neck, layers=[3, 4, 6, 3], base_width=14, block_args=dict(scale=8), **kwargs) - return _create_res2net('res2net50_14w_8s', pretrained, **model_args) - - -@register_model -def res2next50(pretrained=False, **kwargs): - """Construct Res2NeXt-50 4s - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model_args = dict( - block=Bottle2neck, layers=[3, 4, 6, 3], base_width=4, cardinality=8, block_args=dict(scale=4), **kwargs) - return _create_res2net('res2next50', pretrained, **model_args) diff --git a/spaces/coraKong/WorldSimulation/Character.py b/spaces/coraKong/WorldSimulation/Character.py deleted file mode 100644 index 8caa41fe25725dfb7011591880befb9a337c4a45..0000000000000000000000000000000000000000 --- a/spaces/coraKong/WorldSimulation/Character.py +++ /dev/null @@ -1,275 +0,0 @@ -import random -import math -from utils import get_random_name, get_random_clan_name -from config import P_DIE_WHEN_LOSE, IMMORTAL_RANK - -cultivation_rank_map = ["炼气期", "筑基期", "结丹期", "元婴期", "化神期", "成仙者"] -class Character: - def __init__(self, name, gender, special_constitution, spiritual_roots, clan=None, partner=None, parents=None): - self.name = name - self.gender = gender - self.real_age = 0 - self.apparent_age = 0 - self.cultivation_level = 0 # 等级 - self.cultivation_rank = 0 # 层次 - self.special_constitution = special_constitution - self.spiritual_roots = spiritual_roots - self.experience_points = 0 - self.combat_power = self.calculate_combat_power() - self.partner = partner - self.is_alive = True - self.is_immortal = False - self.buff = False - self.history = [] - self.special_history = [] - self.consume_spiritual_energy = 0 - self.parents = parents - self.children = [] - self.clan = clan # 宗族 - - def die(self): - if self.is_alive and not self.is_immortal: - self.history.append(f"{self.real_age}岁,死亡") - self.special_history.append(f"{self.real_age}岁,死亡") - self.is_alive = False - - def cultivate(self, experience_points): - if not self.is_alive: - print("角色已经死亡,无法进行修炼。") - return - # 成仙者不再修炼 - if self.is_immortal: - return - - self.experience_points += experience_points - - # 根据经验值计算等级,等级越高,升级需要的经验值就越多 - self.cultivation_level = math.floor(self.experience_points / ((1 + self.cultivation_rank) * 1000 + (1 + self.cultivation_level) * 100)) - - # 判断是否达到突破修为层次的条件 - if self.cultivation_level >= 10: - # 使用一个随机数来表示突破的概率,当前rank越高,则突破成功的概率越低 - success_probability = 0.9 / (self.cultivation_rank * 2 + 1) - if random.random() < success_probability: - cultivation_level = self.cultivation_level - self.cultivation_rank += 1 - self.cultivation_level = 0 - self.experience_points = 0 - self.history.append(f"{self.real_age}岁,突破成功,在{cultivation_level}级, 到达{self.view_rank()}") - self.special_history.append(f"{self.real_age}岁,突破成功,在{cultivation_level}级, 到达{self.view_rank()}") - # 判断是否达到成仙的条件 - if self.cultivation_rank >= IMMORTAL_RANK: - self.is_immortal = True - self.history.append(f"{self.real_age}岁,成仙了") - self.special_history.append(f"{self.real_age}岁,成仙了") - else: - self.history.append(f"{self.real_age}岁,突破失败,在{self.cultivation_level}级") - - def marry(self, partner): - # 成仙者不再结婚 - if self.is_immortal: - return - if not self.is_alive: - print("角色已经死亡,无法结婚。") - return - self.partner = partner - partner.partner = self - - # 结婚合并宗族 - if self.clan and partner.clan: - if random.random() < 0.5: - partner.clan = self.clan - else: - self.clan = partner.clan - else: - self.clan = self.clan or partner.clan or get_random_clan_name() - partner.clan = self.clan - - self.history.append(f"{self.real_age}岁,结婚了") - self.partner.history.append(f"{self.partner.real_age}岁,结婚了") - self.special_history.append(f"{self.real_age}岁,结婚了") - self.partner.special_history.append(f"{self.partner.real_age}岁,结婚了") - - def give_birth(self): - # 成仙者不会生育 - if self.is_immortal: - return - if not self.is_alive: - print("角色已经死亡,无法生育。") - return - if not self.partner: - print("角色没有结婚,无法生育。") - return - - # 合欢体质加buff - if self.special_constitution[1] == 1: - self.buff = True - - - # 小孩有一定几率遗传父亲或母亲的特殊体质和灵根 - special_constitution = [a if random.random() < 0.5 else b for (a, b) in zip(self.special_constitution, self.partner.special_constitution)] - spiritual_roots = [a if random.random() < 0.5 else b for (a, b) in zip(self.spiritual_roots, self.partner.spiritual_roots)] - - # spiritual_roots 一定几率突变 - spiritual_roots = [random.choice([0, 1]) if random.random() < 0.01 else v for v in spiritual_roots] - - child = Character(get_random_name(), random.choice(["男", "女"]), special_constitution, spiritual_roots, clan=self.clan, parents=[self, self.partner]) - self.history.append(f"{self.real_age}岁,生下小孩{child.name}") - self.partner.history.append(f"{self.partner.real_age}岁,生下小孩{child.name}") - self.children.append(child) - self.partner.children.append(child) - return child - - def grow(self): - # 成仙者不会衰老 - if self.is_immortal: - return - self.real_age += 1 - - # 根据修为层次 和 是否拥有木灵根 计算表观年龄 - # 修为层次越高,外表看起来越年轻 - ratio = 1 + self.cultivation_rank - - # 灵龟和蜉蝣体质 - if self.special_constitution[2] == 1: # 灵龟体质 - ratio *= 2 - elif self.special_constitution[3] == 1: # 蜉蝣体质 - ratio *= 0.5 - - # 根据修为层次计算加成比例,初始加成为10% - bonus = 0.1 * (self.cultivation_rank * 0 + 1) - if self.spiritual_roots[1] == 1: # 拥有木灵根(灵根的第二位),最大寿命有加成 - ratio *= 1 + bonus - - self.apparent_age = math.floor(self.real_age / ratio) - - def calculate_combat_power(self): - # 根据修为层次和修为等级计算战斗力参数:等级越高,各数值都越高;层次对等级是碾压效果 - attack_power = (self.cultivation_rank + 1) * 30 + 1 * (1 + self.cultivation_level) - defense_power = (self.cultivation_rank + 1) * 30 + 1 * (1 + self.cultivation_level) - attack_speed = (self.cultivation_rank + 1) * 30 + 1 * (1 + self.cultivation_level) - health_points = (self.cultivation_rank + 1) * 90 + 3 * (1 + self.cultivation_level) - - # 根据修为层次计算加成比例,初始加成为10% - bonus = 0.1 * (self.cultivation_rank * 0 + 1) - - # 根据灵根调整战斗力参数,灵根分别为:[金木水火土], 分别对以下属性有加成:攻击速度, 最大寿命, 生命值, 攻击力, 防御力 - if self.spiritual_roots[0] == 1: # 金 - attack_speed *= 1 + bonus - - if self.spiritual_roots[2] == 1: # 水 - health_points *= 1 + bonus - - if self.spiritual_roots[3] == 1: # 火 - attack_power *= 1 + bonus - - if self.spiritual_roots[4] == 1: # 土 - defense_power *= 1 + bonus - - return { - 'attack_power': math.ceil(attack_power), - 'defense_power': math.ceil(defense_power), - 'attack_speed': math.ceil(attack_speed), - 'health_points': math.ceil(health_points), - } - - def before_battle(self): - self.combat_power = self.calculate_combat_power() - - def attack(self, opponent, params={"P_CRIT": 0.2, "P_DODGE": 0.05, "P_BLOCK": 0.1, "P_COUNTER": 0.05}): - # 成仙者不会打架 - if self.is_immortal: - return - if not self.is_alive: - print("角色已经死亡,无法攻击。") - return - - # 战斗体质加buff - if self.special_constitution[0] == 1: - self.buff = True - - # 根据攻击速度计算攻击次数 - attack_times = math.ceil(self.combat_power['attack_speed'] / 10) - # 根据攻击力计算伤害 - damage = self.combat_power['attack_power'] * attack_times - # 根据防御力计算伤害减免 - damage -= opponent.combat_power['defense_power'] - # 伤害最小为1 - damage = max(1, damage) - # 根据概率计算暴击 - if random.random() < params['P_CRIT']: - damage *= 2 - print("暴击!") - # 根据概率计算闪避 - if random.random() < params['P_DODGE']: - print("闪避!") - return - # 根据概率计算格挡 - if random.random() < params['P_BLOCK']: - print("格挡!") - damage = max(1, damage // 2) - # 根据概率计算反击 - if random.random() < params['P_COUNTER']: - print("反击!") - damage = max(1, damage // 2) - opponent.combat_power['health_points'] -= damage - return - - if damage > 0: - opponent.combat_power['health_points'] -= damage - - def check_is_alive(self): - if self.combat_power['health_points'] <= 0: - # 一定概率会死亡 - if random.random() < P_DIE_WHEN_LOSE: - self.die() - return False - return True - - def __str__(self): - # Display current attributes - history = "\n".join(self.history) - attributes = [ - f"名字: {self.name}", - f"性别: {self.gender}", - f"年龄: {self.real_age}", - f"表观年龄: {self.apparent_age}", - f"修仙阶段: ({self.cultivation_rank}){self.view_rank()}", - f"特殊体质: {[special_constitution for (special_constitution, is_active) in zip(['战斗体质', '合欢体质', '灵龟体质', '蜉蝣体质'], self.special_constitution) if is_active]}", - f"灵根: {[spiritual_root for (spiritual_root, is_active) in zip(['金', '木', '水', '火', '土'], self.spiritual_roots) if is_active]}", - f"伴侣: {self.partner.name if self.partner is not None else '无'}", - f"存活: {self.is_alive}", - f"宗族: {self.clan if self.clan is not None else '无'}", - f"父母: {[parent.name for parent in self.parents] if self.parents is not None else '无'}", - f"孩子: {[child.name for child in self.children]}", - f"Combat Power: {self.combat_power}", - f"Experience Points: {self.experience_points}", - f"Buff: {self.buff}", - f"历程: {history}", - ] - return "\n".join(attributes) - - def view_rank(self): - if self.cultivation_rank <= len(cultivation_rank_map): - return f"{cultivation_rank_map[self.cultivation_rank]}({self.cultivation_level})" - else: - return "天外飞仙" - - def to_list(self): - numerical_attributes = [ - self.real_age, - self.apparent_age, - self.cultivation_level, - self.cultivation_rank, - self.experience_points, - self.combat_power["attack_power"], - self.combat_power["defense_power"], - self.combat_power["attack_speed"], - self.combat_power["health_points"], - *self.special_constitution, # [1, 0, 0, 0], 数组解构拼接 - *self.spiritual_roots, # [1, 0, 0, 0, 0], 数组解构拼接 - 1 if self.is_alive else 0, # bool转int - 1 if self.buff else 0, # bool转int - len(self.children), - ] - return numerical_attributes \ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/core/seg/sampler/ohem_pixel_sampler.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/core/seg/sampler/ohem_pixel_sampler.py deleted file mode 100644 index 88bb10d44026ba9f21756eaea9e550841cd59b9f..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/core/seg/sampler/ohem_pixel_sampler.py +++ /dev/null @@ -1,76 +0,0 @@ -import torch -import torch.nn.functional as F - -from ..builder import PIXEL_SAMPLERS -from .base_pixel_sampler import BasePixelSampler - - -@PIXEL_SAMPLERS.register_module() -class OHEMPixelSampler(BasePixelSampler): - """Online Hard Example Mining Sampler for segmentation. - - Args: - context (nn.Module): The context of sampler, subclass of - :obj:`BaseDecodeHead`. - thresh (float, optional): The threshold for hard example selection. - Below which, are prediction with low confidence. If not - specified, the hard examples will be pixels of top ``min_kept`` - loss. Default: None. - min_kept (int, optional): The minimum number of predictions to keep. - Default: 100000. - """ - - def __init__(self, context, thresh=None, min_kept=100000): - super(OHEMPixelSampler, self).__init__() - self.context = context - assert min_kept > 1 - self.thresh = thresh - self.min_kept = min_kept - - def sample(self, seg_logit, seg_label): - """Sample pixels that have high loss or with low prediction confidence. - - Args: - seg_logit (torch.Tensor): segmentation logits, shape (N, C, H, W) - seg_label (torch.Tensor): segmentation label, shape (N, 1, H, W) - - Returns: - torch.Tensor: segmentation weight, shape (N, H, W) - """ - with torch.no_grad(): - assert seg_logit.shape[2:] == seg_label.shape[2:] - assert seg_label.shape[1] == 1 - seg_label = seg_label.squeeze(1).long() - batch_kept = self.min_kept * seg_label.size(0) - valid_mask = seg_label != self.context.ignore_index - seg_weight = seg_logit.new_zeros(size=seg_label.size()) - valid_seg_weight = seg_weight[valid_mask] - if self.thresh is not None: - seg_prob = F.softmax(seg_logit, dim=1) - - tmp_seg_label = seg_label.clone().unsqueeze(1) - tmp_seg_label[tmp_seg_label == self.context.ignore_index] = 0 - seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1) - sort_prob, sort_indices = seg_prob[valid_mask].sort() - - if sort_prob.numel() > 0: - min_threshold = sort_prob[min(batch_kept, - sort_prob.numel() - 1)] - else: - min_threshold = 0.0 - threshold = max(min_threshold, self.thresh) - valid_seg_weight[seg_prob[valid_mask] < threshold] = 1. - else: - losses = self.context.loss_decode( - seg_logit, - seg_label, - weight=None, - ignore_index=self.context.ignore_index, - reduction_override='none') - # faster than topk according to https://github.com/pytorch/pytorch/issues/22812 # noqa - _, sort_indices = losses[valid_mask].sort(descending=True) - valid_seg_weight[sort_indices[:batch_kept]] = 1. - - seg_weight[valid_mask] = valid_seg_weight - - return seg_weight diff --git a/spaces/crashedice/signify/SOURCE/yolo_files/utils/torch_utils.py b/spaces/crashedice/signify/SOURCE/yolo_files/utils/torch_utils.py deleted file mode 100644 index 9991e5ec87d8b1ed953d88c30bb57ff25431f5ab..0000000000000000000000000000000000000000 --- a/spaces/crashedice/signify/SOURCE/yolo_files/utils/torch_utils.py +++ /dev/null @@ -1,303 +0,0 @@ -# YOLOv5 PyTorch utils - -import datetime -import logging -import math -import os -import platform -import subprocess -import time -from contextlib import contextmanager -from copy import deepcopy -from pathlib import Path - -import torch -import torch.backends.cudnn as cudnn -import torch.nn as nn -import torch.nn.functional as F -import torchvision - -try: - import thop # for FLOPS computation -except ImportError: - thop = None -logger = logging.getLogger(__name__) - - -@contextmanager -def torch_distributed_zero_first(local_rank: int): - """ - Decorator to make all processes in distributed training wait for each local_master to do something. - """ - if local_rank not in [-1, 0]: - torch.distributed.barrier() - yield - if local_rank == 0: - torch.distributed.barrier() - - -def init_torch_seeds(seed=0): - # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html - torch.manual_seed(seed) - if seed == 0: # slower, more reproducible - cudnn.benchmark, cudnn.deterministic = False, True - else: # faster, less reproducible - cudnn.benchmark, cudnn.deterministic = True, False - - -def date_modified(path=__file__): - # return human-readable file modification date, i.e. '2021-3-26' - t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) - return f'{t.year}-{t.month}-{t.day}' - - -def git_describe(path=Path(__file__).parent): # path must be a directory - # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe - s = f'git -C {path} describe --tags --long --always' - try: - return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] - except subprocess.CalledProcessError as e: - return '' # not a git repository - - -def select_device(device='', batch_size=None): - # device = 'cpu' or '0' or '0,1,2,3' - s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string - cpu = device.lower() == 'cpu' - if cpu: - os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False - elif device: # non-cpu device requested - os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability - - cuda = not cpu and torch.cuda.is_available() - if cuda: - n = torch.cuda.device_count() - if n > 1 and batch_size: # check that batch_size is compatible with device_count - assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' - space = ' ' * len(s) - for i, d in enumerate(device.split(',') if device else range(n)): - p = torch.cuda.get_device_properties(i) - s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB - else: - s += 'CPU\n' - - logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe - return torch.device('cuda:0' if cuda else 'cpu') - - -def time_synchronized(): - # pytorch-accurate time - if torch.cuda.is_available(): - torch.cuda.synchronize() - return time.time() - - -def profile(x, ops, n=100, device=None): - # profile a pytorch module or list of modules. Example usage: - # x = torch.randn(16, 3, 640, 640) # input - # m1 = lambda x: x * torch.sigmoid(x) - # m2 = nn.SiLU() - # profile(x, [m1, m2], n=100) # profile speed over 100 iterations - - device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') - x = x.to(device) - x.requires_grad = True - print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') - print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") - for m in ops if isinstance(ops, list) else [ops]: - m = m.to(device) if hasattr(m, 'to') else m # device - m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type - dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward - try: - flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS - except: - flops = 0 - - for _ in range(n): - t[0] = time_synchronized() - y = m(x) - t[1] = time_synchronized() - try: - _ = y.sum().backward() - t[2] = time_synchronized() - except: # no backward method - t[2] = float('nan') - dtf += (t[1] - t[0]) * 1000 / n # ms per op forward - dtb += (t[2] - t[1]) * 1000 / n # ms per op backward - - s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' - s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' - p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters - print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') - - -def is_parallel(model): - return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) - - -def intersect_dicts(da, db, exclude=()): - # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} - - -def initialize_weights(model): - for m in model.modules(): - t = type(m) - if t is nn.Conv2d: - pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif t is nn.BatchNorm2d: - m.eps = 1e-3 - m.momentum = 0.03 - elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: - m.inplace = True - - -def find_modules(model, mclass=nn.Conv2d): - # Finds layer indices matching module class 'mclass' - return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] - - -def sparsity(model): - # Return global model sparsity - a, b = 0., 0. - for p in model.parameters(): - a += p.numel() - b += (p == 0).sum() - return b / a - - -def prune(model, amount=0.3): - # Prune model to requested global sparsity - import torch.nn.utils.prune as prune - print('Pruning model... ', end='') - for name, m in model.named_modules(): - if isinstance(m, nn.Conv2d): - prune.l1_unstructured(m, name='weight', amount=amount) # prune - prune.remove(m, 'weight') # make permanent - print(' %.3g global sparsity' % sparsity(model)) - - -def fuse_conv_and_bn(conv, bn): - # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ - fusedconv = nn.Conv2d(conv.in_channels, - conv.out_channels, - kernel_size=conv.kernel_size, - stride=conv.stride, - padding=conv.padding, - groups=conv.groups, - bias=True).requires_grad_(False).to(conv.weight.device) - - # prepare filters - w_conv = conv.weight.clone().view(conv.out_channels, -1) - w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) - fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) - - # prepare spatial bias - b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias - b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) - fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) - - return fusedconv - - -def model_info(model, verbose=False, img_size=640): - # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] - n_p = sum(x.numel() for x in model.parameters()) # number parameters - n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients - if verbose: - print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) - for i, (name, p) in enumerate(model.named_parameters()): - name = name.replace('module_list.', '') - print('%5g %40s %9s %12g %20s %10.3g %10.3g' % - (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) - - try: # FLOPS - from thop import profile - stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 - img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input - flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS - img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float - fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS - except (ImportError, Exception): - fs = '' - - logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") - - -def load_classifier(name='resnet101', n=2): - # Loads a pretrained model reshaped to n-class output - model = torchvision.models.__dict__[name](pretrained=True) - - # ResNet model properties - # input_size = [3, 224, 224] - # input_space = 'RGB' - # input_range = [0, 1] - # mean = [0.485, 0.456, 0.406] - # std = [0.229, 0.224, 0.225] - - # Reshape output to n classes - filters = model.fc.weight.shape[1] - model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) - model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) - model.fc.out_features = n - return model - - -def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) - # scales img(bs,3,y,x) by ratio constrained to gs-multiple - if ratio == 1.0: - return img - else: - h, w = img.shape[2:] - s = (int(h * ratio), int(w * ratio)) # new size - img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize - if not same_shape: # pad/crop img - h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] - return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean - - -def copy_attr(a, b, include=(), exclude=()): - # Copy attributes from b to a, options to only include [...] and to exclude [...] - for k, v in b.__dict__.items(): - if (len(include) and k not in include) or k.startswith('_') or k in exclude: - continue - else: - setattr(a, k, v) - - -class ModelEMA: - """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models - Keep a moving average of everything in the model state_dict (parameters and buffers). - This is intended to allow functionality like - https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage - A smoothed version of the weights is necessary for some training schemes to perform well. - This class is sensitive where it is initialized in the sequence of model init, - GPU assignment and distributed training wrappers. - """ - - def __init__(self, model, decay=0.9999, updates=0): - # Create EMA - self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA - # if next(model.parameters()).device.type != 'cpu': - # self.ema.half() # FP16 EMA - self.updates = updates # number of EMA updates - self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) - for p in self.ema.parameters(): - p.requires_grad_(False) - - def update(self, model): - # Update EMA parameters - with torch.no_grad(): - self.updates += 1 - d = self.decay(self.updates) - - msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict - for k, v in self.ema.state_dict().items(): - if v.dtype.is_floating_point: - v *= d - v += (1. - d) * msd[k].detach() - - def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): - # Update EMA attributes - copy_attr(self.ema, model, include, exclude) diff --git a/spaces/crylake/img2poem/query2labels/lib/models/position_encoding.py b/spaces/crylake/img2poem/query2labels/lib/models/position_encoding.py deleted file mode 100644 index 2cf53a923c1dfb32c5ae57501805f90348c9afef..0000000000000000000000000000000000000000 --- a/spaces/crylake/img2poem/query2labels/lib/models/position_encoding.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -We borrow the positional encoding from Detr and simplify the model. -""" -import math -import torch -from torch import nn -from torch.functional import Tensor - -# from utils.misc import NestedTensor - - -class PositionEmbeddingSine(nn.Module): - """ - This is a more standard version of the position embedding, very similar to the one - used by the Attention is all you need paper, generalized to work on images. - """ - def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None, maxH=30, maxW=30): - super().__init__() - self.num_pos_feats = num_pos_feats - self.temperature = temperature - self.normalize = normalize - if scale is not None and normalize is False: - raise ValueError("normalize should be True if scale is passed") - if scale is None: - scale = 2 * math.pi - self.scale = scale - - self.maxH = maxH - self.maxW = maxW - pe = self._gen_pos_buffer() - self.register_buffer('pe', pe) - - def _gen_pos_buffer(self): - _eyes = torch.ones((1, self.maxH, self.maxW)) - y_embed = _eyes.cumsum(1, dtype=torch.float32) - x_embed = _eyes.cumsum(2, dtype=torch.float32) - if self.normalize: - eps = 1e-6 - y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale - x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale - - dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32) - dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) - - pos_x = x_embed[:, :, :, None] / dim_t - pos_y = y_embed[:, :, :, None] / dim_t - pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) - pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) - pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) - return pos - - def forward(self, input: Tensor): - x = input - return self.pe.repeat((x.size(0),1,1,1)) - - -def build_position_encoding(args): - N_steps = args.hidden_dim // 2 - - if args.backbone in ['CvT_w24'] : - downsample_ratio = 16 - else: - downsample_ratio = 32 - - if args.position_embedding in ('v2', 'sine'): - # TODO find a better way of exposing other arguments - assert args.img_size % 32 == 0, "args.img_size ({}) % 32 != 0".format(args.img_size) - position_embedding = PositionEmbeddingSine(N_steps, normalize=True, maxH=args.img_size // downsample_ratio, maxW=args.img_size // downsample_ratio) - # import ipdb; ipdb.set_trace() - else: - raise ValueError(f"not supported {args.position_embedding}") - - return position_embedding diff --git a/spaces/cryptoanonymous/02dlyaPerevoda3dVideoV2DAnime/README.md b/spaces/cryptoanonymous/02dlyaPerevoda3dVideoV2DAnime/README.md deleted file mode 100644 index 1393a47063f06157b55843d803b3339075788997..0000000000000000000000000000000000000000 --- a/spaces/cryptoanonymous/02dlyaPerevoda3dVideoV2DAnime/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: 02dlyaPerevoda3dVideoV2DAnime -emoji: 🦀 -colorFrom: indigo -colorTo: blue -sdk: docker -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/cvlab/zero123-live/taming-transformers/taming/modules/transformer/mingpt.py b/spaces/cvlab/zero123-live/taming-transformers/taming/modules/transformer/mingpt.py deleted file mode 100644 index d14b7b68117f4b9f297b2929397cd4f55089334c..0000000000000000000000000000000000000000 --- a/spaces/cvlab/zero123-live/taming-transformers/taming/modules/transformer/mingpt.py +++ /dev/null @@ -1,415 +0,0 @@ -""" -taken from: https://github.com/karpathy/minGPT/ -GPT model: -- the initial stem consists of a combination of token encoding and a positional encoding -- the meat of it is a uniform sequence of Transformer blocks - - each Transformer is a sequential combination of a 1-hidden-layer MLP block and a self-attention block - - all blocks feed into a central residual pathway similar to resnets -- the final decoder is a linear projection into a vanilla Softmax classifier -""" - -import math -import logging - -import torch -import torch.nn as nn -from torch.nn import functional as F -from transformers import top_k_top_p_filtering - -logger = logging.getLogger(__name__) - - -class GPTConfig: - """ base GPT config, params common to all GPT versions """ - embd_pdrop = 0.1 - resid_pdrop = 0.1 - attn_pdrop = 0.1 - - def __init__(self, vocab_size, block_size, **kwargs): - self.vocab_size = vocab_size - self.block_size = block_size - for k,v in kwargs.items(): - setattr(self, k, v) - - -class GPT1Config(GPTConfig): - """ GPT-1 like network roughly 125M params """ - n_layer = 12 - n_head = 12 - n_embd = 768 - - -class CausalSelfAttention(nn.Module): - """ - A vanilla multi-head masked self-attention layer with a projection at the end. - It is possible to use torch.nn.MultiheadAttention here but I am including an - explicit implementation here to show that there is nothing too scary here. - """ - - def __init__(self, config): - super().__init__() - assert config.n_embd % config.n_head == 0 - # key, query, value projections for all heads - self.key = nn.Linear(config.n_embd, config.n_embd) - self.query = nn.Linear(config.n_embd, config.n_embd) - self.value = nn.Linear(config.n_embd, config.n_embd) - # regularization - self.attn_drop = nn.Dropout(config.attn_pdrop) - self.resid_drop = nn.Dropout(config.resid_pdrop) - # output projection - self.proj = nn.Linear(config.n_embd, config.n_embd) - # causal mask to ensure that attention is only applied to the left in the input sequence - mask = torch.tril(torch.ones(config.block_size, - config.block_size)) - if hasattr(config, "n_unmasked"): - mask[:config.n_unmasked, :config.n_unmasked] = 1 - self.register_buffer("mask", mask.view(1, 1, config.block_size, config.block_size)) - self.n_head = config.n_head - - def forward(self, x, layer_past=None): - B, T, C = x.size() - - # calculate query, key, values for all heads in batch and move head forward to be the batch dim - k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) - q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) - v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) - - present = torch.stack((k, v)) - if layer_past is not None: - past_key, past_value = layer_past - k = torch.cat((past_key, k), dim=-2) - v = torch.cat((past_value, v), dim=-2) - - # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T) - att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) - if layer_past is None: - att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf')) - - att = F.softmax(att, dim=-1) - att = self.attn_drop(att) - y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs) - y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side - - # output projection - y = self.resid_drop(self.proj(y)) - return y, present # TODO: check that this does not break anything - - -class Block(nn.Module): - """ an unassuming Transformer block """ - def __init__(self, config): - super().__init__() - self.ln1 = nn.LayerNorm(config.n_embd) - self.ln2 = nn.LayerNorm(config.n_embd) - self.attn = CausalSelfAttention(config) - self.mlp = nn.Sequential( - nn.Linear(config.n_embd, 4 * config.n_embd), - nn.GELU(), # nice - nn.Linear(4 * config.n_embd, config.n_embd), - nn.Dropout(config.resid_pdrop), - ) - - def forward(self, x, layer_past=None, return_present=False): - # TODO: check that training still works - if return_present: assert not self.training - # layer past: tuple of length two with B, nh, T, hs - attn, present = self.attn(self.ln1(x), layer_past=layer_past) - - x = x + attn - x = x + self.mlp(self.ln2(x)) - if layer_past is not None or return_present: - return x, present - return x - - -class GPT(nn.Module): - """ the full GPT language model, with a context size of block_size """ - def __init__(self, vocab_size, block_size, n_layer=12, n_head=8, n_embd=256, - embd_pdrop=0., resid_pdrop=0., attn_pdrop=0., n_unmasked=0): - super().__init__() - config = GPTConfig(vocab_size=vocab_size, block_size=block_size, - embd_pdrop=embd_pdrop, resid_pdrop=resid_pdrop, attn_pdrop=attn_pdrop, - n_layer=n_layer, n_head=n_head, n_embd=n_embd, - n_unmasked=n_unmasked) - # input embedding stem - self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd) - self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd)) - self.drop = nn.Dropout(config.embd_pdrop) - # transformer - self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)]) - # decoder head - self.ln_f = nn.LayerNorm(config.n_embd) - self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False) - self.block_size = config.block_size - self.apply(self._init_weights) - self.config = config - logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters())) - - def get_block_size(self): - return self.block_size - - def _init_weights(self, module): - if isinstance(module, (nn.Linear, nn.Embedding)): - module.weight.data.normal_(mean=0.0, std=0.02) - if isinstance(module, nn.Linear) and module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - - def forward(self, idx, embeddings=None, targets=None): - # forward the GPT model - token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector - - if embeddings is not None: # prepend explicit embeddings - token_embeddings = torch.cat((embeddings, token_embeddings), dim=1) - - t = token_embeddings.shape[1] - assert t <= self.block_size, "Cannot forward, model block size is exhausted." - position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector - x = self.drop(token_embeddings + position_embeddings) - x = self.blocks(x) - x = self.ln_f(x) - logits = self.head(x) - - # if we are given some desired targets also calculate the loss - loss = None - if targets is not None: - loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1)) - - return logits, loss - - def forward_with_past(self, idx, embeddings=None, targets=None, past=None, past_length=None): - # inference only - assert not self.training - token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector - if embeddings is not None: # prepend explicit embeddings - token_embeddings = torch.cat((embeddings, token_embeddings), dim=1) - - if past is not None: - assert past_length is not None - past = torch.cat(past, dim=-2) # n_layer, 2, b, nh, len_past, dim_head - past_shape = list(past.shape) - expected_shape = [self.config.n_layer, 2, idx.shape[0], self.config.n_head, past_length, self.config.n_embd//self.config.n_head] - assert past_shape == expected_shape, f"{past_shape} =/= {expected_shape}" - position_embeddings = self.pos_emb[:, past_length, :] # each position maps to a (learnable) vector - else: - position_embeddings = self.pos_emb[:, :token_embeddings.shape[1], :] - - x = self.drop(token_embeddings + position_embeddings) - presents = [] # accumulate over layers - for i, block in enumerate(self.blocks): - x, present = block(x, layer_past=past[i, ...] if past is not None else None, return_present=True) - presents.append(present) - - x = self.ln_f(x) - logits = self.head(x) - # if we are given some desired targets also calculate the loss - loss = None - if targets is not None: - loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1)) - - return logits, loss, torch.stack(presents) # _, _, n_layer, 2, b, nh, 1, dim_head - - -class DummyGPT(nn.Module): - # for debugging - def __init__(self, add_value=1): - super().__init__() - self.add_value = add_value - - def forward(self, idx): - return idx + self.add_value, None - - -class CodeGPT(nn.Module): - """Takes in semi-embeddings""" - def __init__(self, vocab_size, block_size, in_channels, n_layer=12, n_head=8, n_embd=256, - embd_pdrop=0., resid_pdrop=0., attn_pdrop=0., n_unmasked=0): - super().__init__() - config = GPTConfig(vocab_size=vocab_size, block_size=block_size, - embd_pdrop=embd_pdrop, resid_pdrop=resid_pdrop, attn_pdrop=attn_pdrop, - n_layer=n_layer, n_head=n_head, n_embd=n_embd, - n_unmasked=n_unmasked) - # input embedding stem - self.tok_emb = nn.Linear(in_channels, config.n_embd) - self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd)) - self.drop = nn.Dropout(config.embd_pdrop) - # transformer - self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)]) - # decoder head - self.ln_f = nn.LayerNorm(config.n_embd) - self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False) - self.block_size = config.block_size - self.apply(self._init_weights) - self.config = config - logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters())) - - def get_block_size(self): - return self.block_size - - def _init_weights(self, module): - if isinstance(module, (nn.Linear, nn.Embedding)): - module.weight.data.normal_(mean=0.0, std=0.02) - if isinstance(module, nn.Linear) and module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - - def forward(self, idx, embeddings=None, targets=None): - # forward the GPT model - token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector - - if embeddings is not None: # prepend explicit embeddings - token_embeddings = torch.cat((embeddings, token_embeddings), dim=1) - - t = token_embeddings.shape[1] - assert t <= self.block_size, "Cannot forward, model block size is exhausted." - position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector - x = self.drop(token_embeddings + position_embeddings) - x = self.blocks(x) - x = self.taming_cinln_f(x) - logits = self.head(x) - - # if we are given some desired targets also calculate the loss - loss = None - if targets is not None: - loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1)) - - return logits, loss - - - -#### sampling utils - -def top_k_logits(logits, k): - v, ix = torch.topk(logits, k) - out = logits.clone() - out[out < v[:, [-1]]] = -float('Inf') - return out - -@torch.no_grad() -def sample(model, x, steps, temperature=1.0, sample=False, top_k=None): - """ - take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in - the sequence, feeding the predictions back into the model each time. Clearly the sampling - has quadratic complexity unlike an RNN that is only linear, and has a finite context window - of block_size, unlike an RNN that has an infinite context window. - """ - block_size = model.get_block_size() - model.eval() - for k in range(steps): - x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed - logits, _ = model(x_cond) - # pluck the logits at the final step and scale by temperature - logits = logits[:, -1, :] / temperature - # optionally crop probabilities to only the top k options - if top_k is not None: - logits = top_k_logits(logits, top_k) - # apply softmax to convert to probabilities - probs = F.softmax(logits, dim=-1) - # sample from the distribution or take the most likely - if sample: - ix = torch.multinomial(probs, num_samples=1) - else: - _, ix = torch.topk(probs, k=1, dim=-1) - # append to the sequence and continue - x = torch.cat((x, ix), dim=1) - - return x - - -@torch.no_grad() -def sample_with_past(x, model, steps, temperature=1., sample_logits=True, - top_k=None, top_p=None, callback=None): - # x is conditioning - sample = x - cond_len = x.shape[1] - past = None - for n in range(steps): - if callback is not None: - callback(n) - logits, _, present = model.forward_with_past(x, past=past, past_length=(n+cond_len-1)) - if past is None: - past = [present] - else: - past.append(present) - logits = logits[:, -1, :] / temperature - if top_k is not None: - logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p) - - probs = F.softmax(logits, dim=-1) - if not sample_logits: - _, x = torch.topk(probs, k=1, dim=-1) - else: - x = torch.multinomial(probs, num_samples=1) - # append to the sequence and continue - sample = torch.cat((sample, x), dim=1) - del past - sample = sample[:, cond_len:] # cut conditioning off - return sample - - -#### clustering utils - -class KMeans(nn.Module): - def __init__(self, ncluster=512, nc=3, niter=10): - super().__init__() - self.ncluster = ncluster - self.nc = nc - self.niter = niter - self.shape = (3,32,32) - self.register_buffer("C", torch.zeros(self.ncluster,nc)) - self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8)) - - def is_initialized(self): - return self.initialized.item() == 1 - - @torch.no_grad() - def initialize(self, x): - N, D = x.shape - assert D == self.nc, D - c = x[torch.randperm(N)[:self.ncluster]] # init clusters at random - for i in range(self.niter): - # assign all pixels to the closest codebook element - a = ((x[:, None, :] - c[None, :, :])**2).sum(-1).argmin(1) - # move each codebook element to be the mean of the pixels that assigned to it - c = torch.stack([x[a==k].mean(0) for k in range(self.ncluster)]) - # re-assign any poorly positioned codebook elements - nanix = torch.any(torch.isnan(c), dim=1) - ndead = nanix.sum().item() - print('done step %d/%d, re-initialized %d dead clusters' % (i+1, self.niter, ndead)) - c[nanix] = x[torch.randperm(N)[:ndead]] # re-init dead clusters - - self.C.copy_(c) - self.initialized.fill_(1) - - - def forward(self, x, reverse=False, shape=None): - if not reverse: - # flatten - bs,c,h,w = x.shape - assert c == self.nc - x = x.reshape(bs,c,h*w,1) - C = self.C.permute(1,0) - C = C.reshape(1,c,1,self.ncluster) - a = ((x-C)**2).sum(1).argmin(-1) # bs, h*w indices - return a - else: - # flatten - bs, HW = x.shape - """ - c = self.C.reshape( 1, self.nc, 1, self.ncluster) - c = c[bs*[0],:,:,:] - c = c[:,:,HW*[0],:] - x = x.reshape(bs, 1, HW, 1) - x = x[:,3*[0],:,:] - x = torch.gather(c, dim=3, index=x) - """ - x = self.C[x] - x = x.permute(0,2,1) - shape = shape if shape is not None else self.shape - x = x.reshape(bs, *shape) - - return x diff --git a/spaces/cymic/Waifu_Diffusion_Webui/modules/scripts.py b/spaces/cymic/Waifu_Diffusion_Webui/modules/scripts.py deleted file mode 100644 index 622c5612f7171b5ee3789b99614bc006a690d62f..0000000000000000000000000000000000000000 --- a/spaces/cymic/Waifu_Diffusion_Webui/modules/scripts.py +++ /dev/null @@ -1,201 +0,0 @@ -import os -import sys -import traceback - -import modules.ui as ui -import gradio as gr - -from modules.processing import StableDiffusionProcessing -from modules import shared - -class Script: - filename = None - args_from = None - args_to = None - - # The title of the script. This is what will be displayed in the dropdown menu. - def title(self): - raise NotImplementedError() - - # How the script is displayed in the UI. See https://gradio.app/docs/#components - # for the different UI components you can use and how to create them. - # Most UI components can return a value, such as a boolean for a checkbox. - # The returned values are passed to the run method as parameters. - def ui(self, is_img2img): - pass - - # Determines when the script should be shown in the dropdown menu via the - # returned value. As an example: - # is_img2img is True if the current tab is img2img, and False if it is txt2img. - # Thus, return is_img2img to only show the script on the img2img tab. - def show(self, is_img2img): - return True - - # This is where the additional processing is implemented. The parameters include - # self, the model object "p" (a StableDiffusionProcessing class, see - # processing.py), and the parameters returned by the ui method. - # Custom functions can be defined here, and additional libraries can be imported - # to be used in processing. The return value should be a Processed object, which is - # what is returned by the process_images method. - def run(self, *args): - raise NotImplementedError() - - # The description method is currently unused. - # To add a description that appears when hovering over the title, amend the "titles" - # dict in script.js to include the script title (returned by title) as a key, and - # your description as the value. - def describe(self): - return "" - - -scripts_data = [] - - -def load_scripts(basedir): - if not os.path.exists(basedir): - return - - for filename in sorted(os.listdir(basedir)): - path = os.path.join(basedir, filename) - - if not os.path.isfile(path): - continue - - try: - with open(path, "r", encoding="utf8") as file: - text = file.read() - - from types import ModuleType - compiled = compile(text, path, 'exec') - module = ModuleType(filename) - exec(compiled, module.__dict__) - - for key, script_class in module.__dict__.items(): - if type(script_class) == type and issubclass(script_class, Script): - scripts_data.append((script_class, path)) - - except Exception: - print(f"Error loading script: {filename}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - - -def wrap_call(func, filename, funcname, *args, default=None, **kwargs): - try: - res = func(*args, **kwargs) - return res - except Exception: - print(f"Error calling: {filename}/{funcname}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - - return default - - -class ScriptRunner: - def __init__(self): - self.scripts = [] - - def setup_ui(self, is_img2img): - for script_class, path in scripts_data: - script = script_class() - script.filename = path - - if not script.show(is_img2img): - continue - - self.scripts.append(script) - - titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in self.scripts] - - dropdown = gr.Dropdown(label="Script", choices=["None"] + titles, value="None", type="index") - inputs = [dropdown] - - for script in self.scripts: - script.args_from = len(inputs) - script.args_to = len(inputs) - - controls = wrap_call(script.ui, script.filename, "ui", is_img2img) - - if controls is None: - continue - - for control in controls: - control.custom_script_source = os.path.basename(script.filename) - control.visible = False - - inputs += controls - script.args_to = len(inputs) - - def select_script(script_index): - if 0 < script_index <= len(self.scripts): - script = self.scripts[script_index-1] - args_from = script.args_from - args_to = script.args_to - else: - args_from = 0 - args_to = 0 - - return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))] - - dropdown.change( - fn=select_script, - inputs=[dropdown], - outputs=inputs - ) - - return inputs - - def run(self, p: StableDiffusionProcessing, *args): - script_index = args[0] - - if script_index == 0: - return None - - script = self.scripts[script_index-1] - - if script is None: - return None - - script_args = args[script.args_from:script.args_to] - processed = script.run(p, *script_args) - - shared.total_tqdm.clear() - - return processed - - def reload_sources(self): - for si, script in list(enumerate(self.scripts)): - with open(script.filename, "r", encoding="utf8") as file: - args_from = script.args_from - args_to = script.args_to - filename = script.filename - text = file.read() - - from types import ModuleType - - compiled = compile(text, filename, 'exec') - module = ModuleType(script.filename) - exec(compiled, module.__dict__) - - for key, script_class in module.__dict__.items(): - if type(script_class) == type and issubclass(script_class, Script): - self.scripts[si] = script_class() - self.scripts[si].filename = filename - self.scripts[si].args_from = args_from - self.scripts[si].args_to = args_to - -scripts_txt2img = ScriptRunner() -scripts_img2img = ScriptRunner() - -def reload_script_body_only(): - scripts_txt2img.reload_sources() - scripts_img2img.reload_sources() - - -def reload_scripts(basedir): - global scripts_txt2img, scripts_img2img - - scripts_data.clear() - load_scripts(basedir) - - scripts_txt2img = ScriptRunner() - scripts_img2img = ScriptRunner() diff --git a/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/face3d/models/arcface_torch/inference.py b/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/face3d/models/arcface_torch/inference.py deleted file mode 100644 index 3e5156e8d649954837e397c2ff15ec29995e7502..0000000000000000000000000000000000000000 --- a/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/face3d/models/arcface_torch/inference.py +++ /dev/null @@ -1,35 +0,0 @@ -import argparse - -import cv2 -import numpy as np -import torch - -from backbones import get_model - - -@torch.no_grad() -def inference(weight, name, img): - if img is None: - img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.uint8) - else: - img = cv2.imread(img) - img = cv2.resize(img, (112, 112)) - - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = np.transpose(img, (2, 0, 1)) - img = torch.from_numpy(img).unsqueeze(0).float() - img.div_(255).sub_(0.5).div_(0.5) - net = get_model(name, fp16=False) - net.load_state_dict(torch.load(weight)) - net.eval() - feat = net(img).numpy() - print(feat) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='PyTorch ArcFace Training') - parser.add_argument('--network', type=str, default='r50', help='backbone network') - parser.add_argument('--weight', type=str, default='') - parser.add_argument('--img', type=str, default=None) - args = parser.parse_args() - inference(args.weight, args.network, args.img) diff --git a/spaces/dansome/Document_Summarization/app.py b/spaces/dansome/Document_Summarization/app.py deleted file mode 100644 index cb2a29ae8fc681f49dfacbc6bf9904227de33553..0000000000000000000000000000000000000000 --- a/spaces/dansome/Document_Summarization/app.py +++ /dev/null @@ -1,54 +0,0 @@ -import streamlit -import pandas as pd -#import torch -from transformers import pipeline -import streamlit as st - -def app(): - st.title("Text Summarization 🤓") - - st.markdown("This is a Web application that Summarizes Text 😎") - upload_file = st.file_uploader('Upload a file containing Text data') - button = st.button("Summarize") - - st.cache(allow_output_mutation=True) - def facebook_bart_model(): - summarizer = pipeline("summarization", model="facebook/bart-large-cnn") - return summarizer - summarizer= facebook_bart_model() - - def text_summarizer(text): - a = summarizer(text, max_length=150, min_length=30, do_sample=False) - return a[0]['summary_text'] - - - # Check to see if a file has been uploaded - if upload_file is not None and button: - st.success("Summarizing Text, Please wait...") - # If it has then do the following: - - # Read the file to a dataframe using pandas - df = pd.read_csv(upload_file) - - # Create a section for the dataframe header - - df1 = df.copy() - df1['summarized_text'] = df1['Dialog'].apply(text_summarizer) - - df2 = df1[['Name','summarized_text']] - st.write(df2.head(5)) - - @st.cache - def convert_df(dataframe): - return dataframe.to_csv().encode('utf-8') - - csv = convert_df(df2) - st.download_button(label="Download CSV", data=csv, file_name='summarized_output.csv', mime='text/csv') - - - - - - -if __name__ == "__main__": - app() diff --git a/spaces/dbuscombe/SatelliteSuperResolution/README.md b/spaces/dbuscombe/SatelliteSuperResolution/README.md deleted file mode 100644 index ffff09e081662ee5408d575cc59bac42b0a110f4..0000000000000000000000000000000000000000 --- a/spaces/dbuscombe/SatelliteSuperResolution/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: SatelliteSuperResolution -emoji: 🏢 -colorFrom: yellow -colorTo: gray -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/afmLib.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/afmLib.py deleted file mode 100644 index 394b901ff5eb149b40c0d9ae425c02d5ad0b5111..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/afmLib.py +++ /dev/null @@ -1,438 +0,0 @@ -"""Module for reading and writing AFM (Adobe Font Metrics) files. - -Note that this has been designed to read in AFM files generated by Fontographer -and has not been tested on many other files. In particular, it does not -implement the whole Adobe AFM specification [#f1]_ but, it should read most -"common" AFM files. - -Here is an example of using `afmLib` to read, modify and write an AFM file: - - >>> from fontTools.afmLib import AFM - >>> f = AFM("Tests/afmLib/data/TestAFM.afm") - >>> - >>> # Accessing a pair gets you the kern value - >>> f[("V","A")] - -60 - >>> - >>> # Accessing a glyph name gets you metrics - >>> f["A"] - (65, 668, (8, -25, 660, 666)) - >>> # (charnum, width, bounding box) - >>> - >>> # Accessing an attribute gets you metadata - >>> f.FontName - 'TestFont-Regular' - >>> f.FamilyName - 'TestFont' - >>> f.Weight - 'Regular' - >>> f.XHeight - 500 - >>> f.Ascender - 750 - >>> - >>> # Attributes and items can also be set - >>> f[("A","V")] = -150 # Tighten kerning - >>> f.FontName = "TestFont Squished" - >>> - >>> # And the font written out again (remove the # in front) - >>> #f.write("testfont-squished.afm") - -.. rubric:: Footnotes - -.. [#f1] `Adobe Technote 5004 `_, - Adobe Font Metrics File Format Specification. - -""" - - -import re - -# every single line starts with a "word" -identifierRE = re.compile(r"^([A-Za-z]+).*") - -# regular expression to parse char lines -charRE = re.compile( - r"(-?\d+)" # charnum - r"\s*;\s*WX\s+" # ; WX - r"(-?\d+)" # width - r"\s*;\s*N\s+" # ; N - r"([.A-Za-z0-9_]+)" # charname - r"\s*;\s*B\s+" # ; B - r"(-?\d+)" # left - r"\s+" - r"(-?\d+)" # bottom - r"\s+" - r"(-?\d+)" # right - r"\s+" - r"(-?\d+)" # top - r"\s*;\s*" # ; -) - -# regular expression to parse kerning lines -kernRE = re.compile( - r"([.A-Za-z0-9_]+)" # leftchar - r"\s+" - r"([.A-Za-z0-9_]+)" # rightchar - r"\s+" - r"(-?\d+)" # value - r"\s*" -) - -# regular expressions to parse composite info lines of the form: -# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ; -compositeRE = re.compile( - r"([.A-Za-z0-9_]+)" r"\s+" r"(\d+)" r"\s*;\s*" # char name # number of parts -) -componentRE = re.compile( - r"PCC\s+" # PPC - r"([.A-Za-z0-9_]+)" # base char name - r"\s+" - r"(-?\d+)" # x offset - r"\s+" - r"(-?\d+)" # y offset - r"\s*;\s*" -) - -preferredAttributeOrder = [ - "FontName", - "FullName", - "FamilyName", - "Weight", - "ItalicAngle", - "IsFixedPitch", - "FontBBox", - "UnderlinePosition", - "UnderlineThickness", - "Version", - "Notice", - "EncodingScheme", - "CapHeight", - "XHeight", - "Ascender", - "Descender", -] - - -class error(Exception): - pass - - -class AFM(object): - - _attrs = None - - _keywords = [ - "StartFontMetrics", - "EndFontMetrics", - "StartCharMetrics", - "EndCharMetrics", - "StartKernData", - "StartKernPairs", - "EndKernPairs", - "EndKernData", - "StartComposites", - "EndComposites", - ] - - def __init__(self, path=None): - """AFM file reader. - - Instantiating an object with a path name will cause the file to be opened, - read, and parsed. Alternatively the path can be left unspecified, and a - file can be parsed later with the :meth:`read` method.""" - self._attrs = {} - self._chars = {} - self._kerning = {} - self._index = {} - self._comments = [] - self._composites = {} - if path is not None: - self.read(path) - - def read(self, path): - """Opens, reads and parses a file.""" - lines = readlines(path) - for line in lines: - if not line.strip(): - continue - m = identifierRE.match(line) - if m is None: - raise error("syntax error in AFM file: " + repr(line)) - - pos = m.regs[1][1] - word = line[:pos] - rest = line[pos:].strip() - if word in self._keywords: - continue - if word == "C": - self.parsechar(rest) - elif word == "KPX": - self.parsekernpair(rest) - elif word == "CC": - self.parsecomposite(rest) - else: - self.parseattr(word, rest) - - def parsechar(self, rest): - m = charRE.match(rest) - if m is None: - raise error("syntax error in AFM file: " + repr(rest)) - things = [] - for fr, to in m.regs[1:]: - things.append(rest[fr:to]) - charname = things[2] - del things[2] - charnum, width, l, b, r, t = (int(thing) for thing in things) - self._chars[charname] = charnum, width, (l, b, r, t) - - def parsekernpair(self, rest): - m = kernRE.match(rest) - if m is None: - raise error("syntax error in AFM file: " + repr(rest)) - things = [] - for fr, to in m.regs[1:]: - things.append(rest[fr:to]) - leftchar, rightchar, value = things - value = int(value) - self._kerning[(leftchar, rightchar)] = value - - def parseattr(self, word, rest): - if word == "FontBBox": - l, b, r, t = [int(thing) for thing in rest.split()] - self._attrs[word] = l, b, r, t - elif word == "Comment": - self._comments.append(rest) - else: - try: - value = int(rest) - except (ValueError, OverflowError): - self._attrs[word] = rest - else: - self._attrs[word] = value - - def parsecomposite(self, rest): - m = compositeRE.match(rest) - if m is None: - raise error("syntax error in AFM file: " + repr(rest)) - charname = m.group(1) - ncomponents = int(m.group(2)) - rest = rest[m.regs[0][1] :] - components = [] - while True: - m = componentRE.match(rest) - if m is None: - raise error("syntax error in AFM file: " + repr(rest)) - basechar = m.group(1) - xoffset = int(m.group(2)) - yoffset = int(m.group(3)) - components.append((basechar, xoffset, yoffset)) - rest = rest[m.regs[0][1] :] - if not rest: - break - assert len(components) == ncomponents - self._composites[charname] = components - - def write(self, path, sep="\r"): - """Writes out an AFM font to the given path.""" - import time - - lines = [ - "StartFontMetrics 2.0", - "Comment Generated by afmLib; at %s" - % (time.strftime("%m/%d/%Y %H:%M:%S", time.localtime(time.time()))), - ] - - # write comments, assuming (possibly wrongly!) they should - # all appear at the top - for comment in self._comments: - lines.append("Comment " + comment) - - # write attributes, first the ones we know about, in - # a preferred order - attrs = self._attrs - for attr in preferredAttributeOrder: - if attr in attrs: - value = attrs[attr] - if attr == "FontBBox": - value = "%s %s %s %s" % value - lines.append(attr + " " + str(value)) - # then write the attributes we don't know about, - # in alphabetical order - items = sorted(attrs.items()) - for attr, value in items: - if attr in preferredAttributeOrder: - continue - lines.append(attr + " " + str(value)) - - # write char metrics - lines.append("StartCharMetrics " + repr(len(self._chars))) - items = [ - (charnum, (charname, width, box)) - for charname, (charnum, width, box) in self._chars.items() - ] - - def myKey(a): - """Custom key function to make sure unencoded chars (-1) - end up at the end of the list after sorting.""" - if a[0] == -1: - a = (0xFFFF,) + a[1:] # 0xffff is an arbitrary large number - return a - - items.sort(key=myKey) - - for charnum, (charname, width, (l, b, r, t)) in items: - lines.append( - "C %d ; WX %d ; N %s ; B %d %d %d %d ;" - % (charnum, width, charname, l, b, r, t) - ) - lines.append("EndCharMetrics") - - # write kerning info - lines.append("StartKernData") - lines.append("StartKernPairs " + repr(len(self._kerning))) - items = sorted(self._kerning.items()) - for (leftchar, rightchar), value in items: - lines.append("KPX %s %s %d" % (leftchar, rightchar, value)) - lines.append("EndKernPairs") - lines.append("EndKernData") - - if self._composites: - composites = sorted(self._composites.items()) - lines.append("StartComposites %s" % len(self._composites)) - for charname, components in composites: - line = "CC %s %s ;" % (charname, len(components)) - for basechar, xoffset, yoffset in components: - line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset) - lines.append(line) - lines.append("EndComposites") - - lines.append("EndFontMetrics") - - writelines(path, lines, sep) - - def has_kernpair(self, pair): - """Returns `True` if the given glyph pair (specified as a tuple) exists - in the kerning dictionary.""" - return pair in self._kerning - - def kernpairs(self): - """Returns a list of all kern pairs in the kerning dictionary.""" - return list(self._kerning.keys()) - - def has_char(self, char): - """Returns `True` if the given glyph exists in the font.""" - return char in self._chars - - def chars(self): - """Returns a list of all glyph names in the font.""" - return list(self._chars.keys()) - - def comments(self): - """Returns all comments from the file.""" - return self._comments - - def addComment(self, comment): - """Adds a new comment to the file.""" - self._comments.append(comment) - - def addComposite(self, glyphName, components): - """Specifies that the glyph `glyphName` is made up of the given components. - The components list should be of the following form:: - - [ - (glyphname, xOffset, yOffset), - ... - ] - - """ - self._composites[glyphName] = components - - def __getattr__(self, attr): - if attr in self._attrs: - return self._attrs[attr] - else: - raise AttributeError(attr) - - def __setattr__(self, attr, value): - # all attrs *not* starting with "_" are consider to be AFM keywords - if attr[:1] == "_": - self.__dict__[attr] = value - else: - self._attrs[attr] = value - - def __delattr__(self, attr): - # all attrs *not* starting with "_" are consider to be AFM keywords - if attr[:1] == "_": - try: - del self.__dict__[attr] - except KeyError: - raise AttributeError(attr) - else: - try: - del self._attrs[attr] - except KeyError: - raise AttributeError(attr) - - def __getitem__(self, key): - if isinstance(key, tuple): - # key is a tuple, return the kernpair - return self._kerning[key] - else: - # return the metrics instead - return self._chars[key] - - def __setitem__(self, key, value): - if isinstance(key, tuple): - # key is a tuple, set kernpair - self._kerning[key] = value - else: - # set char metrics - self._chars[key] = value - - def __delitem__(self, key): - if isinstance(key, tuple): - # key is a tuple, del kernpair - del self._kerning[key] - else: - # del char metrics - del self._chars[key] - - def __repr__(self): - if hasattr(self, "FullName"): - return "" % self.FullName - else: - return "" % id(self) - - -def readlines(path): - with open(path, "r", encoding="ascii") as f: - data = f.read() - return data.splitlines() - - -def writelines(path, lines, sep="\r"): - with open(path, "w", encoding="ascii", newline=sep) as f: - f.write("\n".join(lines) + "\n") - - -if __name__ == "__main__": - import EasyDialogs - - path = EasyDialogs.AskFileForOpen() - if path: - afm = AFM(path) - char = "A" - if afm.has_char(char): - print(afm[char]) # print charnum, width and boundingbox - pair = ("A", "V") - if afm.has_kernpair(pair): - print(afm[pair]) # print kerning value for pair - print(afm.Version) # various other afm entries have become attributes - print(afm.Weight) - # afm.comments() returns a list of all Comment lines found in the AFM - print(afm.comments()) - # print afm.chars() - # print afm.kernpairs() - print(afm) - afm.write(path + ".muck") diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/misc/symfont.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/misc/symfont.py deleted file mode 100644 index 0bd69a386ec9f01c8951f0dfc8bc8c261718cf1f..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/misc/symfont.py +++ /dev/null @@ -1,251 +0,0 @@ -from fontTools.pens.basePen import BasePen -from functools import partial -from itertools import count -import sympy as sp -import sys - -n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic - -t, x, y = sp.symbols("t x y", real=True) -c = sp.symbols("c", real=False) # Complex representation instead of x/y - -X = tuple(sp.symbols("x:%d" % (n + 1), real=True)) -Y = tuple(sp.symbols("y:%d" % (n + 1), real=True)) -P = tuple(zip(*(sp.symbols("p:%d[%s]" % (n + 1, w), real=True) for w in "01"))) -C = tuple(sp.symbols("c:%d" % (n + 1), real=False)) - -# Cubic Bernstein basis functions -BinomialCoefficient = [(1, 0)] -for i in range(1, n + 1): - last = BinomialCoefficient[-1] - this = tuple(last[j - 1] + last[j] for j in range(len(last))) + (0,) - BinomialCoefficient.append(this) -BinomialCoefficient = tuple(tuple(item[:-1]) for item in BinomialCoefficient) -del last, this - -BernsteinPolynomial = tuple( - tuple(c * t**i * (1 - t) ** (n - i) for i, c in enumerate(coeffs)) - for n, coeffs in enumerate(BinomialCoefficient) -) - -BezierCurve = tuple( - tuple( - sum(P[i][j] * bernstein for i, bernstein in enumerate(bernsteins)) - for j in range(2) - ) - for n, bernsteins in enumerate(BernsteinPolynomial) -) -BezierCurveC = tuple( - sum(C[i] * bernstein for i, bernstein in enumerate(bernsteins)) - for n, bernsteins in enumerate(BernsteinPolynomial) -) - - -def green(f, curveXY): - f = -sp.integrate(sp.sympify(f), y) - f = f.subs({x: curveXY[0], y: curveXY[1]}) - f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1)) - return f - - -class _BezierFuncsLazy(dict): - def __init__(self, symfunc): - self._symfunc = symfunc - self._bezfuncs = {} - - def __missing__(self, i): - args = ["p%d" % d for d in range(i + 1)] - f = green(self._symfunc, BezierCurve[i]) - f = sp.gcd_terms(f.collect(sum(P, ()))) # Optimize - return sp.lambdify(args, f) - - -class GreenPen(BasePen): - - _BezierFuncs = {} - - @classmethod - def _getGreenBezierFuncs(celf, func): - funcstr = str(func) - if not funcstr in celf._BezierFuncs: - celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func) - return celf._BezierFuncs[funcstr] - - def __init__(self, func, glyphset=None): - BasePen.__init__(self, glyphset) - self._funcs = self._getGreenBezierFuncs(func) - self.value = 0 - - def _moveTo(self, p0): - self.__startPoint = p0 - - def _closePath(self): - p0 = self._getCurrentPoint() - if p0 != self.__startPoint: - self._lineTo(self.__startPoint) - - def _endPath(self): - p0 = self._getCurrentPoint() - if p0 != self.__startPoint: - # Green theorem is not defined on open contours. - raise NotImplementedError - - def _lineTo(self, p1): - p0 = self._getCurrentPoint() - self.value += self._funcs[1](p0, p1) - - def _qCurveToOne(self, p1, p2): - p0 = self._getCurrentPoint() - self.value += self._funcs[2](p0, p1, p2) - - def _curveToOne(self, p1, p2, p3): - p0 = self._getCurrentPoint() - self.value += self._funcs[3](p0, p1, p2, p3) - - -# Sample pens. -# Do not use this in real code. -# Use fontTools.pens.momentsPen.MomentsPen instead. -AreaPen = partial(GreenPen, func=1) -MomentXPen = partial(GreenPen, func=x) -MomentYPen = partial(GreenPen, func=y) -MomentXXPen = partial(GreenPen, func=x * x) -MomentYYPen = partial(GreenPen, func=y * y) -MomentXYPen = partial(GreenPen, func=x * y) - - -def printGreenPen(penName, funcs, file=sys.stdout, docstring=None): - - if docstring is not None: - print('"""%s"""' % docstring) - - print( - """from fontTools.pens.basePen import BasePen, OpenContourError -try: - import cython - - COMPILED = cython.compiled -except (AttributeError, ImportError): - # if cython not installed, use mock module with no-op decorators and types - from fontTools.misc import cython - - COMPILED = False - - -__all__ = ["%s"] - -class %s(BasePen): - - def __init__(self, glyphset=None): - BasePen.__init__(self, glyphset) -""" - % (penName, penName), - file=file, - ) - for name, f in funcs: - print(" self.%s = 0" % name, file=file) - print( - """ - def _moveTo(self, p0): - self.__startPoint = p0 - - def _closePath(self): - p0 = self._getCurrentPoint() - if p0 != self.__startPoint: - self._lineTo(self.__startPoint) - - def _endPath(self): - p0 = self._getCurrentPoint() - if p0 != self.__startPoint: - # Green theorem is not defined on open contours. - raise OpenContourError( - "Green theorem is not defined on open contours." - ) -""", - end="", - file=file, - ) - - for n in (1, 2, 3): - - subs = {P[i][j]: [X, Y][j][i] for i in range(n + 1) for j in range(2)} - greens = [green(f, BezierCurve[n]) for name, f in funcs] - greens = [sp.gcd_terms(f.collect(sum(P, ()))) for f in greens] # Optimize - greens = [f.subs(subs) for f in greens] # Convert to p to x/y - defs, exprs = sp.cse( - greens, - optimizations="basic", - symbols=(sp.Symbol("r%d" % i) for i in count()), - ) - - print() - for name, value in defs: - print(" @cython.locals(%s=cython.double)" % name, file=file) - if n == 1: - print( - """\ - @cython.locals(x0=cython.double, y0=cython.double) - @cython.locals(x1=cython.double, y1=cython.double) - def _lineTo(self, p1): - x0,y0 = self._getCurrentPoint() - x1,y1 = p1 -""", - file=file, - ) - elif n == 2: - print( - """\ - @cython.locals(x0=cython.double, y0=cython.double) - @cython.locals(x1=cython.double, y1=cython.double) - @cython.locals(x2=cython.double, y2=cython.double) - def _qCurveToOne(self, p1, p2): - x0,y0 = self._getCurrentPoint() - x1,y1 = p1 - x2,y2 = p2 -""", - file=file, - ) - elif n == 3: - print( - """\ - @cython.locals(x0=cython.double, y0=cython.double) - @cython.locals(x1=cython.double, y1=cython.double) - @cython.locals(x2=cython.double, y2=cython.double) - @cython.locals(x3=cython.double, y3=cython.double) - def _curveToOne(self, p1, p2, p3): - x0,y0 = self._getCurrentPoint() - x1,y1 = p1 - x2,y2 = p2 - x3,y3 = p3 -""", - file=file, - ) - for name, value in defs: - print(" %s = %s" % (name, value), file=file) - - print(file=file) - for name, value in zip([f[0] for f in funcs], exprs): - print(" self.%s += %s" % (name, value), file=file) - - print( - """ -if __name__ == '__main__': - from fontTools.misc.symfont import x, y, printGreenPen - printGreenPen('%s', [""" - % penName, - file=file, - ) - for name, f in funcs: - print(" ('%s', %s)," % (name, str(f)), file=file) - print(" ])", file=file) - - -if __name__ == "__main__": - pen = AreaPen() - pen.moveTo((100, 100)) - pen.lineTo((100, 200)) - pen.lineTo((200, 200)) - pen.curveTo((200, 250), (300, 300), (250, 350)) - pen.lineTo((200, 100)) - pen.closePath() - print(pen.value) diff --git a/spaces/ddiddi/bhasha.dev/app.py b/spaces/ddiddi/bhasha.dev/app.py deleted file mode 100644 index c9b1f6e0fea945d6540de2fbdbf392485586ee50..0000000000000000000000000000000000000000 --- a/spaces/ddiddi/bhasha.dev/app.py +++ /dev/null @@ -1,319 +0,0 @@ -import gradio as gr -from datetime import datetime -from PIL import Image -import flag -import os - -from libretranslatepy import LibreTranslateAPI -lt = LibreTranslateAPI("https://translate.argosopentech.com/") - - -stable_diffusion = gr.Blocks.load(name="spaces/stabilityai/stable-diffusion") -### ———————————————————————————————————————— - -title="Any Text to Stable Diffusion" - - -def get_translation(text): - lang_detected = lt.detect(text)[0]['language'] - print(lang_detected) - english_translated = lt.translate(text, lang_detected, "en") - print(english_translated) - return english_translated - -def get_images(prompt): - prompt_t = get_translation(prompt) - gallery_dir = stable_diffusion(prompt_t, fn_index=2) - return [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)] - -css = """ - .container { - max-width: 880px; - margin: auto; - padding-top: 1.5rem; - } - a { - text-decoration: underline; - } - h1 { - font-weight: 900; - margin-bottom: 7px; - text-align: center; - font-size: 2em; - margin-bottom: 1em; - } - #w2sd_container{ - margin-top: 20px; - } - .footer { - margin-bottom: 45px; - margin-top: 35px; - text-align: center; - border-bottom: 1px solid #e5e5e5; - } - .footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; - } - .dark .footer { - border-color: #303030; - } - .dark .footer>p { - background: #0b0f19; - } - .tabitem { - border-bottom-left-radius: 10px; - border-bottom-right-radius: 10px; - } - #record_tab, #upload_tab { - font-size: 1.2em; - } - #record_btn{ - - } - #record_btn > div > button > span { - width: 2.375rem; - height: 2.375rem; - } - #record_btn > div > button > span > span { - width: 2.375rem; - height: 2.375rem; - } - audio { - margin-bottom: 10px; - } - div#record_btn > .mt-6{ - margin-top: 0!important; - } - div#record_btn > .mt-6 button { - font-size: 2em; - width: 100%; - padding: 20px; - height: 160px; - } - div#upload_area { - height: 11.1rem; - } - div#upload_area > div.w-full > div { - min-height: 9rem; - } - #check_btn_1, #check_btn_2{ - color: #fff; - --tw-gradient-from: #4caf50; - --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to); - --tw-gradient-to: #4caf50; - border-color: #8bc34a; - } - #magic_btn_1, #magic_btn_2{ - color: #fff; - --tw-gradient-from: #f44336; - --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to); - --tw-gradient-to: #ff9800; - border-color: #ff9800; - } - input::-webkit-inner-spin-button, input::-webkit-outer-spin-button { - -webkit-appearance: none; - } - input[type=number] { - -moz-appearance: textfield; - } - input[type=range] { - -webkit-appearance: none; - cursor: pointer; - height: 1px; - background: currentColor; - } - input[type=range]::-webkit-slider-thumb { - -webkit-appearance: none; - width: 0.5em; - height: 1.2em; - border-radius: 10px; - background: currentColor; - } - input[type=range]::-moz-range-thumb{ - width: 0.5em; - height: 1.2em; - border-radius: 10px; - background: currentColor; - } - div#spoken_lang textarea { - font-size: 4em; - line-height: 1em; - text-align: center; - } - div#transcripted { - flex: 4; - } - div#translated textarea { - font-size: 1.5em; - line-height: 1.25em; - } - #sd_settings { - margin-bottom: 20px; - } - #diffuse_btn { - color: #fff; - font-size: 1em; - margin-bottom: 20px; - --tw-gradient-from: #4caf50; - --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to); - --tw-gradient-to: #4caf50; - border-color: #8bc34a; - } - #translate_btn { - color: #fff; - font-size: 1em; - margin-bottom: 20px; - --tw-gradient-from: #4caf50; - --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to); - --tw-gradient-to: #4caf50; - border-color: #8bc34a; - } - #notice { - padding: 20px 14px 10px; - display: flex; - align-content: space-evenly; - gap: 20px; - line-height: 1em; - font-size: .8em; - border: 1px solid #374151; - border-radius: 10px; - } - #about { - padding: 20px; - } - #notice > div { - flex: 1; - } - -""" - -### ———————————————————————————————————————— - -with gr.Blocks(css=css) as demo: - with gr.Column(): - gr.HTML(''' -

      - Any Text to Stable Diffusion -

      -

      - Ask stable diffusion in any language ! -

      - -

      - This demo is connected to StableDiffusion Space • Offered by ddiddi
      -

      - - ''') - - - - with gr.Accordion(label="Stable Diffusion Settings", elem_id="sd_settings", visible=False): - with gr.Row(): - guidance_scale = gr.Slider(2, 15, value = 7, label = 'Guidance Scale') - nb_iterations = gr.Slider(10, 50, value = 25, step = 1, label = 'Steps') - seed = gr.Slider(label = "Seed", minimum = 0, maximum = 2147483647, step = 1, randomize = True) - - gr.Markdown( - """ - ## 1. Enter prompt - Here are some examples: \n - Доброе утро \t - おはようございます \t - शुभ प्रभात \t - Good morning - """ - - - ) - - with gr.Row(): - - enter_prompt = gr.Textbox( - label="Enter prompt", - lines=3, - elem_id="transcript" - ) - - with gr.Column(): - translated_output = gr.Textbox( - label="in English", - lines=3, - elem_id="translated" - ) - with gr.Row(): - clear_btn = gr.Button(value="Clear") - translate_btn = gr.Button(value="Translate", elem_id="translate_btn") - diffuse_btn = gr.Button(value="Translate >> SD!", elem_id="diffuse_btn") - - clear_btn.click(fn=lambda value: gr.update(value=""), inputs=clear_btn, outputs=translated_output) - - - -# with gr.Column(): - - - - gr.Markdown(""" - ## 3. Stable Diffusion Results - Inference time is about ~30-40 seconds - """ - ) - - sd_output = gr.Gallery().style(grid=2, height="auto") - - - gr.Markdown(""" - ### 📌 Resources - -

      -

      - Stable Diffusion is a state of the art text-to-image model that generates images from text. -

      -
      -
      - LICENSE -

      - The model is licensed with a CreativeML Open RAIL-M license.

      -

      - The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license.

      -

      - The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups.

      -

      - For the full list of restrictions please read the license. -

      -
      -
      - Biases and content acknowledgment -

      - Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence.

      -

      - The model was trained on the LAION-5B dataset, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes.

      -

      You can read more in the model card. -

      -
      -
      - - """, elem_id="about") - - - diffuse_btn.click(get_images, - inputs = [ - enter_prompt - ], - outputs = sd_output - ) - - translate_btn.click(get_translation, - inputs = [ - enter_prompt - ], - outputs = translated_output - ) - - -if __name__ == "__main__": - demo.launch() diff --git a/spaces/ddiddi/bhasha.dev/translate.py b/spaces/ddiddi/bhasha.dev/translate.py deleted file mode 100644 index 3106d76302ca152c014c0d0a217fb04fe87afb5f..0000000000000000000000000000000000000000 --- a/spaces/ddiddi/bhasha.dev/translate.py +++ /dev/null @@ -1,10 +0,0 @@ -from libretranslatepy import LibreTranslateAPI - -lt = LibreTranslateAPI("https://translate.argosopentech.com/") - -attempt_input = input("Enter input:") -detected = lt.detect(attempt_input)[0]['language'] -print(detected) -print(lt.translate(attempt_input, detected, "en")) - - diff --git a/spaces/declare-lab/tango/audioldm/variational_autoencoder/autoencoder.py b/spaces/declare-lab/tango/audioldm/variational_autoencoder/autoencoder.py deleted file mode 100644 index 9dadc849da65d1f9eb82dc75dc777250bf738151..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/audioldm/variational_autoencoder/autoencoder.py +++ /dev/null @@ -1,135 +0,0 @@ -import torch -from audioldm.latent_diffusion.ema import * -from audioldm.variational_autoencoder.modules import Encoder, Decoder -from audioldm.variational_autoencoder.distributions import DiagonalGaussianDistribution - -from audioldm.hifigan.utilities import get_vocoder, vocoder_infer - - -class AutoencoderKL(nn.Module): - def __init__( - self, - ddconfig=None, - lossconfig=None, - image_key="fbank", - embed_dim=None, - time_shuffle=1, - subband=1, - ckpt_path=None, - reload_from_ckpt=None, - ignore_keys=[], - colorize_nlabels=None, - monitor=None, - base_learning_rate=1e-5, - scale_factor=1 - ): - super().__init__() - - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - - self.subband = int(subband) - - if self.subband > 1: - print("Use subband decomposition %s" % self.subband) - - self.quant_conv = torch.nn.Conv2d(2 * ddconfig["z_channels"], 2 * embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) - - self.vocoder = get_vocoder(None, "cpu") - self.embed_dim = embed_dim - - if monitor is not None: - self.monitor = monitor - - self.time_shuffle = time_shuffle - self.reload_from_ckpt = reload_from_ckpt - self.reloaded = False - self.mean, self.std = None, None - - self.scale_factor = scale_factor - - def encode(self, x): - # x = self.time_shuffle_operation(x) - x = self.freq_split_subband(x) - h = self.encoder(x) - moments = self.quant_conv(h) - posterior = DiagonalGaussianDistribution(moments) - return posterior - - def decode(self, z): - z = self.post_quant_conv(z) - dec = self.decoder(z) - dec = self.freq_merge_subband(dec) - return dec - - def decode_to_waveform(self, dec): - dec = dec.squeeze(1).permute(0, 2, 1) - wav_reconstruction = vocoder_infer(dec, self.vocoder) - return wav_reconstruction - - def forward(self, input, sample_posterior=True): - posterior = self.encode(input) - if sample_posterior: - z = posterior.sample() - else: - z = posterior.mode() - - if self.flag_first_run: - print("Latent size: ", z.size()) - self.flag_first_run = False - - dec = self.decode(z) - - return dec, posterior - - def freq_split_subband(self, fbank): - if self.subband == 1 or self.image_key != "stft": - return fbank - - bs, ch, tstep, fbins = fbank.size() - - assert fbank.size(-1) % self.subband == 0 - assert ch == 1 - - return ( - fbank.squeeze(1) - .reshape(bs, tstep, self.subband, fbins // self.subband) - .permute(0, 2, 1, 3) - ) - - def freq_merge_subband(self, subband_fbank): - if self.subband == 1 or self.image_key != "stft": - return subband_fbank - assert subband_fbank.size(1) == self.subband # Channel dimension - bs, sub_ch, tstep, fbins = subband_fbank.size() - return subband_fbank.permute(0, 2, 1, 3).reshape(bs, tstep, -1).unsqueeze(1) - - def device(self): - return next(self.parameters()).device - - @torch.no_grad() - def encode_first_stage(self, x): - return self.encode(x) - - @torch.no_grad() - def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) - z = rearrange(z, "b h w c -> b c h w").contiguous() - - z = 1.0 / self.scale_factor * z - return self.decode(z) - - def get_first_stage_encoding(self, encoder_posterior): - if isinstance(encoder_posterior, DiagonalGaussianDistribution): - z = encoder_posterior.sample() - elif isinstance(encoder_posterior, torch.Tensor): - z = encoder_posterior - else: - raise NotImplementedError( - f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" - ) - return self.scale_factor * z \ No newline at end of file diff --git a/spaces/declare-lab/tango/diffusers/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py b/spaces/declare-lab/tango/diffusers/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py deleted file mode 100644 index 9396329434059db279d7b276af0301905fbc49cc..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py +++ /dev/null @@ -1,299 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import random -import unittest - -import numpy as np -import torch -from transformers import XLMRobertaTokenizer - -from diffusers import ( - AltDiffusionImg2ImgPipeline, - AutoencoderKL, - PNDMScheduler, - UNet2DConditionModel, -) -from diffusers.image_processor import VaeImageProcessor -from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( - RobertaSeriesConfig, - RobertaSeriesModelWithTransformation, -) -from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu - - -torch.backends.cuda.matmul.allow_tf32 = False - - -class AltDiffusionImg2ImgPipelineFastTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image - - @property - def dummy_cond_unet(self): - torch.manual_seed(0) - model = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=4, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - return model - - @property - def dummy_vae(self): - torch.manual_seed(0) - model = AutoencoderKL( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=4, - ) - return model - - @property - def dummy_text_encoder(self): - torch.manual_seed(0) - config = RobertaSeriesConfig( - hidden_size=32, - project_dim=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=5006, - ) - return RobertaSeriesModelWithTransformation(config) - - @property - def dummy_extractor(self): - def extract(*args, **kwargs): - class Out: - def __init__(self): - self.pixel_values = torch.ones([0]) - - def to(self, device): - self.pixel_values.to(device) - return self - - return Out() - - return extract - - def test_stable_diffusion_img2img_default_case(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") - tokenizer.model_max_length = 77 - - init_image = self.dummy_image.to(device) - - # make sure here that pndm scheduler skips prk - alt_pipe = AltDiffusionImg2ImgPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) - alt_pipe.image_processor = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=False) - alt_pipe = alt_pipe.to(device) - alt_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = alt_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - ) - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = alt_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - return_dict=False, - )[0] - - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - - assert image.shape == (1, 32, 32, 3) - expected_slice = np.array([0.4115, 0.3870, 0.4089, 0.4807, 0.4668, 0.4144, 0.4151, 0.4721, 0.4569]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-3 - - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_img2img_fp16(self): - """Test that stable diffusion img2img works with fp16""" - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") - tokenizer.model_max_length = 77 - - init_image = self.dummy_image.to(torch_device) - - # put models in fp16 - unet = unet.half() - vae = vae.half() - bert = bert.half() - - # make sure here that pndm scheduler skips prk - alt_pipe = AltDiffusionImg2ImgPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) - alt_pipe.image_processor = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=False) - alt_pipe = alt_pipe.to(torch_device) - alt_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.manual_seed(0) - image = alt_pipe( - [prompt], - generator=generator, - num_inference_steps=2, - output_type="np", - image=init_image, - ).images - - assert image.shape == (1, 32, 32, 3) - - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_img2img_pipeline_multiple_of_8(self): - init_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" - "/img2img/sketch-mountains-input.jpg" - ) - # resize to resolution that is divisible by 8 but not 16 or 32 - init_image = init_image.resize((760, 504)) - - model_id = "BAAI/AltDiffusion" - pipe = AltDiffusionImg2ImgPipeline.from_pretrained( - model_id, - safety_checker=None, - ) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - prompt = "A fantasy landscape, trending on artstation" - - generator = torch.manual_seed(0) - output = pipe( - prompt=prompt, - image=init_image, - strength=0.75, - guidance_scale=7.5, - generator=generator, - output_type="np", - ) - image = output.images[0] - - image_slice = image[255:258, 383:386, -1] - - assert image.shape == (504, 760, 3) - expected_slice = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - - -@slow -@require_torch_gpu -class AltDiffusionImg2ImgPipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_stable_diffusion_img2img_pipeline_default(self): - init_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" - "/img2img/sketch-mountains-input.jpg" - ) - init_image = init_image.resize((768, 512)) - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" - ) - - model_id = "BAAI/AltDiffusion" - pipe = AltDiffusionImg2ImgPipeline.from_pretrained( - model_id, - safety_checker=None, - ) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - prompt = "A fantasy landscape, trending on artstation" - - generator = torch.manual_seed(0) - output = pipe( - prompt=prompt, - image=init_image, - strength=0.75, - guidance_scale=7.5, - generator=generator, - output_type="np", - ) - image = output.images[0] - - assert image.shape == (512, 768, 3) - # img2img is flaky across GPUs even in fp32, so using MAE here - assert np.abs(expected_image - image).max() < 1e-3 diff --git a/spaces/deepkyu/multilingual-font-style-transfer/utils/__init__.py b/spaces/deepkyu/multilingual-font-style-transfer/utils/__init__.py deleted file mode 100644 index 92d46ca56c78b5cfff3b71f2298d5573244207dc..0000000000000000000000000000000000000000 --- a/spaces/deepkyu/multilingual-font-style-transfer/utils/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .logger import * -from .tb import * -from .util import * \ No newline at end of file diff --git a/spaces/deepwisdom/MetaGPT/tests/metagpt/memory/__init__.py b/spaces/deepwisdom/MetaGPT/tests/metagpt/memory/__init__.py deleted file mode 100644 index 2bcf8efd09712339308e72659e84450d3fa829fd..0000000000000000000000000000000000000000 --- a/spaces/deepwisdom/MetaGPT/tests/metagpt/memory/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Desc : diff --git a/spaces/deprem-ml/deprem-ocr-test/README.md b/spaces/deprem-ml/deprem-ocr-test/README.md deleted file mode 100644 index e40b6c77b6a00873820796cfb7d2be7106ca0d49..0000000000000000000000000000000000000000 --- a/spaces/deprem-ml/deprem-ocr-test/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Deprem OCR TEST -emoji: 🧪 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: true -duplicated_from: deprem-ml/deprem-ocr ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/diacanFperku/AutoGPT/Character Generator Crack Code.md b/spaces/diacanFperku/AutoGPT/Character Generator Crack Code.md deleted file mode 100644 index f2f0c794e30d2dca7a1a539539e21078d0382eaf..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Character Generator Crack Code.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Character Generator crack code


      Download Zip ––– https://gohhs.com/2uFUr0



      - -Download File Reallusion Character Creator 0927 Pipeline x64 Crack Only ... C Free source code and tutorials for Software developers and Architects. 1. 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/diacanFperku/AutoGPT/Cisco Iou NEW! Keygen.Py.md b/spaces/diacanFperku/AutoGPT/Cisco Iou NEW! Keygen.Py.md deleted file mode 100644 index 8116686e8a14b2579e7d94ddb20c9368001e0d15..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Cisco Iou NEW! Keygen.Py.md +++ /dev/null @@ -1,10 +0,0 @@ -

      Cisco Iou Keygen.Py


      Download ✔✔✔ https://gohhs.com/2uFUvi



      - -Here is a link to download the Cisco license key: ... Also on the site, there are links to download keys for other products: ... -Download key for Cisco Unified Communications Manager 6.3.0.2 (Cisco Unified Communications Manager -Jan 20 2014 · I had a problem activating Cisco Unified Communications Manager, I wrote to Cisco tech support and ... Download cisco unified communications manager activation code. -Views: 463, Comments: 0. Rating: +13 -2 Apr. 2015 ... Cisco Unified Communications Manager, Free Unified ... 8a78ff9644
      -
      -
      -

      diff --git a/spaces/diagaiwei/ir_chinese_medqa/colbert/infra/launcher.py b/spaces/diagaiwei/ir_chinese_medqa/colbert/infra/launcher.py deleted file mode 100644 index f55b2892a78a0714888e79412de36919eda93071..0000000000000000000000000000000000000000 --- a/spaces/diagaiwei/ir_chinese_medqa/colbert/infra/launcher.py +++ /dev/null @@ -1,147 +0,0 @@ -import os -import time -import torch -import random - -import torch.multiprocessing as mp -import numpy as np - -try: - mp.set_start_method('spawn', force=True) -except RuntimeError: - pass - -import colbert.utils.distributed as distributed - -from colbert.infra.run import Run -from colbert.infra.config import BaseConfig, RunConfig, RunSettings - -from colbert.utils.utils import print_message - - -class Launcher: - def __init__(self, callee, run_config=None, return_all=False): - self.callee = callee - self.return_all = return_all - - self.run_config = RunConfig.from_existing(Run().config, run_config) - self.nranks = self.run_config.nranks - - def launch(self, custom_config, *args): - return_value_queue = mp.Queue() - - rng = random.Random(time.time()) - port = str(12355 + rng.randint(0, 1000)) # randomize the port to avoid collision on launching several jobs. - - all_procs = [] - for new_rank in range(0, self.nranks): - assert isinstance(custom_config, BaseConfig) - assert isinstance(custom_config, RunSettings) - - new_config = type(custom_config).from_existing(custom_config, self.run_config, RunConfig(rank=new_rank)) - - args_ = (self.callee, port, return_value_queue, new_config, *args) - all_procs.append(mp.Process(target=setup_new_process, args=args_)) - - # Clear GPU space (e.g., after a `Searcher` on GPU-0 is deleted) - # TODO: Generalize this from GPU-0 only! - # TODO: Move this to a function. And call that function from __del__ in a class that's inherited by Searcher, Indexer, etc. - - # t = torch.cuda.get_device_properties(0).total_memory - # r = torch.cuda.memory_reserved(0) - # a = torch.cuda.memory_allocated(0) - # f = r-a - - # print_message(f"[Pre-Emptying] GPU memory check: r={r}, a={a}, f={f}") - - torch.cuda.empty_cache() - - # t = torch.cuda.get_device_properties(0).total_memory - # r = torch.cuda.memory_reserved(0) - # a = torch.cuda.memory_allocated(0) - # f = r-a - - # print_message(f"[Post-Emptying] GPU memory check: r={r}, a={a}, f={f}") - - print_memory_stats('MAIN') - - for proc in all_procs: - print("#> Starting...") - proc.start() - - print_memory_stats('MAIN') - - # TODO: If the processes crash upon join, raise an exception and don't block on .get() below! - - return_values = sorted([return_value_queue.get() for _ in all_procs]) - return_values = [val for rank, val in return_values] - - if not self.return_all: - return_values = return_values[0] - - for proc in all_procs: - proc.join() - print("#> Joined...") - - print_memory_stats('MAIN') - - return return_values - - -def setup_new_process(callee, port, return_value_queue, config, *args): - print_memory_stats() - - random.seed(12345) - np.random.seed(12345) - torch.manual_seed(12345) - torch.cuda.manual_seed_all(12345) - - rank, nranks = config.rank, config.nranks - - os.environ["MASTER_ADDR"] = "localhost" - os.environ["MASTER_PORT"] = port - os.environ["WORLD_SIZE"] = str(config.nranks) - os.environ["RANK"] = str(config.rank) - - # TODO: Ideally the gpus "getter" handles this max-nranks thing! - os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, config.gpus_[:nranks])) - - nranks_, distributed_ = distributed.init(rank) - assert nranks_ == nranks - - # Run.init(args.rank, args.root, args.experiment, args.run) - - with Run().context(config, inherit_config=False): - return_val = callee(config, *args) - - return_value_queue.put((rank, return_val)) - - -def print_memory_stats(message=''): - return # FIXME: Add this back before release. - - import psutil # Remove before releases? Or at least make optional with try/except. - - global_info = psutil.virtual_memory() - total, available, used, free = global_info.total, global_info.available, global_info.used, global_info.free - - info = psutil.Process().memory_info() - rss, vms, shared = info.rss, info.vms, info.shared - uss = psutil.Process().memory_full_info().uss - - gib = 1024 ** 3 - - summary = f""" - "[PID: {os.getpid()}] - [{message}] - Available: {available / gib:,.1f} / {total / gib:,.1f} - Free: {free / gib:,.1f} / {total / gib:,.1f} - Usage: {used / gib:,.1f} / {total / gib:,.1f} - - RSS: {rss / gib:,.1f} - VMS: {vms / gib:,.1f} - USS: {uss / gib:,.1f} - SHARED: {shared / gib:,.1f} - """.strip().replace('\n', '\t') - - print_message(summary, pad=True) diff --git a/spaces/digitalxingtong/Luzao-Bert-Vits2/text/tone_sandhi.py b/spaces/digitalxingtong/Luzao-Bert-Vits2/text/tone_sandhi.py deleted file mode 100644 index 0f45b7a72c5d858bcaab19ac85cfa686bf9a74da..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Luzao-Bert-Vits2/text/tone_sandhi.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List -from typing import Tuple - -import jieba -from pypinyin import lazy_pinyin -from pypinyin import Style - - -class ToneSandhi(): - def __init__(self): - self.must_neural_tone_words = { - '麻烦', '麻利', '鸳鸯', '高粱', '骨头', '骆驼', '马虎', '首饰', '馒头', '馄饨', '风筝', - '难为', '队伍', '阔气', '闺女', '门道', '锄头', '铺盖', '铃铛', '铁匠', '钥匙', '里脊', - '里头', '部分', '那么', '道士', '造化', '迷糊', '连累', '这么', '这个', '运气', '过去', - '软和', '转悠', '踏实', '跳蚤', '跟头', '趔趄', '财主', '豆腐', '讲究', '记性', '记号', - '认识', '规矩', '见识', '裁缝', '补丁', '衣裳', '衣服', '衙门', '街坊', '行李', '行当', - '蛤蟆', '蘑菇', '薄荷', '葫芦', '葡萄', '萝卜', '荸荠', '苗条', '苗头', '苍蝇', '芝麻', - '舒服', '舒坦', '舌头', '自在', '膏药', '脾气', '脑袋', '脊梁', '能耐', '胳膊', '胭脂', - '胡萝', '胡琴', '胡同', '聪明', '耽误', '耽搁', '耷拉', '耳朵', '老爷', '老实', '老婆', - '老头', '老太', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂', - '精神', '粮食', '簸箕', '篱笆', '算计', '算盘', '答应', '笤帚', '笑语', '笑话', '窟窿', - '窝囊', '窗户', '稳当', '稀罕', '称呼', '秧歌', '秀气', '秀才', '福气', '祖宗', '砚台', - '码头', '石榴', '石头', '石匠', '知识', '眼睛', '眯缝', '眨巴', '眉毛', '相声', '盘算', - '白净', '痢疾', '痛快', '疟疾', '疙瘩', '疏忽', '畜生', '生意', '甘蔗', '琵琶', '琢磨', - '琉璃', '玻璃', '玫瑰', '玄乎', '狐狸', '状元', '特务', '牲口', '牙碜', '牌楼', '爽快', - '爱人', '热闹', '烧饼', '烟筒', '烂糊', '点心', '炊帚', '灯笼', '火候', '漂亮', '滑溜', - '溜达', '温和', '清楚', '消息', '浪头', '活泼', '比方', '正经', '欺负', '模糊', '槟榔', - '棺材', '棒槌', '棉花', '核桃', '栅栏', '柴火', '架势', '枕头', '枇杷', '机灵', '本事', - '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾', - '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼', - '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打点', '打扮', '打听', '打发', '扎实', - '扁担', '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头', - '念叨', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼', - '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数', - '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气', - '实在', '官司', '学问', '学生', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈', - '姑娘', '姐夫', '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方', - '大意', '大夫', '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴', - '嘱咐', '嘟囔', '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦', - '咳嗽', '和尚', '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝', - '叫唤', '口袋', '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹', - '功夫', '力气', '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息', - '凑合', '凉快', '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤', - '佩服', '作坊', '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家', - '交情', '云彩', '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故', - '不由', '不在', '下水', '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨', - '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅', - '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱', - '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', - '扫把', '惦记' - } - self.must_not_neural_tone_words = { - "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎" - } - self.punc = ":,;。?!“”‘’':,;.?!" - - # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041 - # e.g. - # word: "家里" - # pos: "s" - # finals: ['ia1', 'i3'] - def _neural_sandhi(self, word: str, pos: str, - finals: List[str]) -> List[str]: - - # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺 - for j, item in enumerate(word): - if j - 1 >= 0 and item == word[j - 1] and pos[0] in { - "n", "v", "a" - } and word not in self.must_not_neural_tone_words: - finals[j] = finals[j][:-1] + "5" - ge_idx = word.find("个") - if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶": - finals[-1] = finals[-1][:-1] + "5" - elif len(word) >= 1 and word[-1] in "的地得": - finals[-1] = finals[-1][:-1] + "5" - # e.g. 走了, 看着, 去过 - # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}: - # finals[-1] = finals[-1][:-1] + "5" - elif len(word) > 1 and word[-1] in "们子" and pos in { - "r", "n" - } and word not in self.must_not_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 桌上, 地下, 家里 - elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 上来, 下去 - elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开": - finals[-1] = finals[-1][:-1] + "5" - # 个做量词 - elif (ge_idx >= 1 and - (word[ge_idx - 1].isnumeric() or - word[ge_idx - 1] in "几有两半多各整每做是")) or word == '个': - finals[ge_idx] = finals[ge_idx][:-1] + "5" - else: - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - - word_list = self._split_word(word) - finals_list = [finals[:len(word_list[0])], finals[len(word_list[0]):]] - for i, word in enumerate(word_list): - # conventional neural in Chinese - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals_list[i][-1] = finals_list[i][-1][:-1] + "5" - finals = sum(finals_list, []) - return finals - - def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]: - # e.g. 看不懂 - if len(word) == 3 and word[1] == "不": - finals[1] = finals[1][:-1] + "5" - else: - for i, char in enumerate(word): - # "不" before tone4 should be bu2, e.g. 不怕 - if char == "不" and i + 1 < len(word) and finals[i + - 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - return finals - - def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]: - # "一" in number sequences, e.g. 一零零, 二一零 - if word.find("一") != -1 and all( - [item.isnumeric() for item in word if item != "一"]): - return finals - # "一" between reduplication words shold be yi5, e.g. 看一看 - elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]: - finals[1] = finals[1][:-1] + "5" - # when "一" is ordinal word, it should be yi1 - elif word.startswith("第一"): - finals[1] = finals[1][:-1] + "1" - else: - for i, char in enumerate(word): - if char == "一" and i + 1 < len(word): - # "一" before tone4 should be yi2, e.g. 一段 - if finals[i + 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - # "一" before non-tone4 should be yi4, e.g. 一天 - else: - # "一" 后面如果是标点,还读一声 - if word[i + 1] not in self.punc: - finals[i] = finals[i][:-1] + "4" - return finals - - def _split_word(self, word: str) -> List[str]: - word_list = jieba.cut_for_search(word) - word_list = sorted(word_list, key=lambda i: len(i), reverse=False) - first_subword = word_list[0] - first_begin_idx = word.find(first_subword) - if first_begin_idx == 0: - second_subword = word[len(first_subword):] - new_word_list = [first_subword, second_subword] - else: - second_subword = word[:-len(first_subword)] - new_word_list = [second_subword, first_subword] - return new_word_list - - def _three_sandhi(self, word: str, finals: List[str]) -> List[str]: - if len(word) == 2 and self._all_tone_three(finals): - finals[0] = finals[0][:-1] + "2" - elif len(word) == 3: - word_list = self._split_word(word) - if self._all_tone_three(finals): - # disyllabic + monosyllabic, e.g. 蒙古/包 - if len(word_list[0]) == 2: - finals[0] = finals[0][:-1] + "2" - finals[1] = finals[1][:-1] + "2" - # monosyllabic + disyllabic, e.g. 纸/老虎 - elif len(word_list[0]) == 1: - finals[1] = finals[1][:-1] + "2" - else: - finals_list = [ - finals[:len(word_list[0])], finals[len(word_list[0]):] - ] - if len(finals_list) == 2: - for i, sub in enumerate(finals_list): - # e.g. 所有/人 - if self._all_tone_three(sub) and len(sub) == 2: - finals_list[i][0] = finals_list[i][0][:-1] + "2" - # e.g. 好/喜欢 - elif i == 1 and not self._all_tone_three(sub) and finals_list[i][0][-1] == "3" and \ - finals_list[0][-1][-1] == "3": - - finals_list[0][-1] = finals_list[0][-1][:-1] + "2" - finals = sum(finals_list, []) - # split idiom into two words who's length is 2 - elif len(word) == 4: - finals_list = [finals[:2], finals[2:]] - finals = [] - for sub in finals_list: - if self._all_tone_three(sub): - sub[0] = sub[0][:-1] + "2" - finals += sub - - return finals - - def _all_tone_three(self, finals: List[str]) -> bool: - return all(x[-1] == "3" for x in finals) - - # merge "不" and the word behind it - # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error - def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - last_word = "" - for word, pos in seg: - if last_word == "不": - word = last_word + word - if word != "不": - new_seg.append((word, pos)) - last_word = word[:] - if last_word == "不": - new_seg.append((last_word, 'd')) - last_word = "" - return new_seg - - # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听" - # function 2: merge single "一" and the word behind it - # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error - # e.g. - # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')] - # output seg: [['听一听', 'v']] - def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - # function 1 - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "一" and i + 1 < len(seg) and seg[i - 1][ - 0] == seg[i + 1][0] and seg[i - 1][1] == "v": - new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0] - else: - if i - 2 >= 0 and seg[i - 1][0] == "一" and seg[i - 2][ - 0] == word and pos == "v": - continue - else: - new_seg.append([word, pos]) - seg = new_seg - new_seg = [] - # function 2 - for i, (word, pos) in enumerate(seg): - if new_seg and new_seg[-1][0] == "一": - new_seg[-1][0] = new_seg[-1][0] + word - else: - new_seg.append([word, pos]) - return new_seg - - # the first and the second words are all_tone_three - def _merge_continuous_three_tones( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and self._all_tone_three( - sub_finals_list[i - 1]) and self._all_tone_three( - sub_finals_list[i]) and not merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - - return new_seg - - def _is_reduplication(self, word: str) -> bool: - return len(word) == 2 and word[0] == word[1] - - # the last char of first word and the first char of second word is tone_three - def _merge_continuous_three_tones_2( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and sub_finals_list[i - 1][-1][-1] == "3" and sub_finals_list[i][0][-1] == "3" and not \ - merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "儿" and seg[i-1][0] != "#": - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_reduplication( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if new_seg and word == new_seg[-1][0]: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def pre_merge_for_modify( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - seg = self._merge_bu(seg) - try: - seg = self._merge_yi(seg) - except: - print("_merge_yi failed") - seg = self._merge_reduplication(seg) - seg = self._merge_continuous_three_tones(seg) - seg = self._merge_continuous_three_tones_2(seg) - seg = self._merge_er(seg) - return seg - - def modified_tone(self, word: str, pos: str, - finals: List[str]) -> List[str]: - finals = self._bu_sandhi(word, finals) - finals = self._yi_sandhi(word, finals) - finals = self._neural_sandhi(word, pos, finals) - finals = self._three_sandhi(word, finals) - return finals diff --git a/spaces/digitalxingtong/Nailv-Bert-Vits2/mel_processing.py b/spaces/digitalxingtong/Nailv-Bert-Vits2/mel_processing.py deleted file mode 100644 index 50435ecf88ef4fb6c1d47f3e6edd04c3ea7d3e80..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Nailv-Bert-Vits2/mel_processing.py +++ /dev/null @@ -1,112 +0,0 @@ -import math -import os -import random -import torch -from torch import nn -import torch.nn.functional as F -import torch.utils.data -import numpy as np -import librosa -import librosa.util as librosa_util -from librosa.util import normalize, pad_center, tiny -from scipy.signal import get_window -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/digitalxingtong/Xingtong-Longread-Bert-VITS2/commons.py b/spaces/digitalxingtong/Xingtong-Longread-Bert-VITS2/commons.py deleted file mode 100644 index 9ad0444b61cbadaa388619986c2889c707d873ce..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-Longread-Bert-VITS2/commons.py +++ /dev/null @@ -1,161 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/transforms.py b/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/dragonSwing/annotate-anything/README.md b/spaces/dragonSwing/annotate-anything/README.md deleted file mode 100644 index 10eb4ca00f0267ab08bd0fdf9d82921552ebdbf5..0000000000000000000000000000000000000000 --- a/spaces/dragonSwing/annotate-anything/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Annotate Anything -emoji: 🐨 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.33.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/dylanebert/list-of-splats/README.md b/spaces/dylanebert/list-of-splats/README.md deleted file mode 100644 index 9419882eaab71abf595034997927130d179de4f2..0000000000000000000000000000000000000000 --- a/spaces/dylanebert/list-of-splats/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: List Of Splats -emoji: 🌖 -colorFrom: gray -colorTo: green -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/erbanku/gpt-academic/crazy_functions/test_project/cpp/cppipc/pool_alloc.cpp b/spaces/erbanku/gpt-academic/crazy_functions/test_project/cpp/cppipc/pool_alloc.cpp deleted file mode 100644 index c94575903bdf2eef71ecbe66382375552446e510..0000000000000000000000000000000000000000 --- a/spaces/erbanku/gpt-academic/crazy_functions/test_project/cpp/cppipc/pool_alloc.cpp +++ /dev/null @@ -1,17 +0,0 @@ -#include "libipc/pool_alloc.h" - -#include "libipc/memory/resource.h" - -namespace ipc { -namespace mem { - -void* pool_alloc::alloc(std::size_t size) { - return async_pool_alloc::alloc(size); -} - -void pool_alloc::free(void* p, std::size_t size) { - async_pool_alloc::free(p, size); -} - -} // namespace mem -} // namespace ipc diff --git a/spaces/falterWliame/Face_Mask_Detection/Alfatest Grp 1.27 26 _HOT_.md b/spaces/falterWliame/Face_Mask_Detection/Alfatest Grp 1.27 26 _HOT_.md deleted file mode 100644 index 2c463ef9b109c3dffd6e8047e11004810407dd01..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Alfatest Grp 1.27 26 _HOT_.md +++ /dev/null @@ -1,25 +0,0 @@ -
      -

      What is Alfatest Grp 1.27 26 and how to use it?

      -

      Alfatest Grp 1.27 26 is a software program that helps you diagnose and repair various automotive problems. It is developed by Alfatest Ind. e Com. Prods. Eletrônicos S/A, a Brazilian company that specializes in electronic products for the automotive industry.

      -

      Alfatest Grp 1.27 26


      Downloadhttps://urlca.com/2uDdN7



      -

      Alfatest Grp 1.27 26 is part of the Guia de Reparos Plus (Repair Guide Plus) series, which provides detailed information on how to fix common issues with different car models and systems. You can access the software by downloading it from the Alfatest website or by using a CD-ROM.

      -

      To use Alfatest Grp 1.27 26, you need to install it on your computer and connect it to a compatible diagnostic device, such as a scanner or an oscilloscope. The software will then guide you through the steps of identifying and solving the problem, using clear instructions, diagrams, and videos.

      -

      Some of the features of Alfatest Grp 1.27 26 are:

      -

      -
        -
      • It covers a wide range of vehicles, from cars to trucks, motorcycles, and buses.
      • -
      • It supports various languages, such as Portuguese, Spanish, English, and French.
      • -
      • It updates automatically with new information and tips.
      • -
      • It allows you to print or save reports of your repairs.
      • -
      -

      Alfatest Grp 1.27 26 is a useful tool for anyone who works with automotive maintenance or repair. It can help you save time and money by providing you with accurate and reliable solutions.

      - -

      If you want to learn more about Alfatest Grp 1.27 26, you can visit the Alfatest website, where you can find manuals, videos, and FAQs. You can also contact the Alfatest support team by phone or email if you have any questions or issues with the software.

      -

      Alfatest Grp 1.27 26 is not the only product that Alfatest offers. The company also has other software programs and devices that can help you with different aspects of automotive diagnostics and repair. For example, you can use Alfatest Kaptor to scan and analyze the electronic systems of your vehicle, or Alfatest Scope to measure and display electrical signals.

      -

      Alfatest is a leader in the field of automotive electronics in Brazil and Latin America. The company has been in business since 1986 and has a reputation for quality and innovation. Alfatest products are used by thousands of professionals and enthusiasts around the world.

      - -

      If you are interested in buying Alfatest Grp 1.27 26 or any other Alfatest product, you can visit the Alfatest online store, where you can find the prices and specifications of each item. You can also find a list of authorized dealers and distributors in your area.

      -

      Alfatest Grp 1.27 26 is compatible with Windows XP, Vista, 7, 8, and 10. It requires a minimum of 512 MB of RAM and 2 GB of free disk space. It also requires an internet connection for updates and activation.

      -

      Alfatest Grp 1.27 26 is a powerful and versatile software program that can help you diagnose and repair automotive problems with ease and confidence. It is a must-have for anyone who works with cars or loves them.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Codevisionavr V3.md b/spaces/falterWliame/Face_Mask_Detection/Codevisionavr V3.md deleted file mode 100644 index 34495790da4584f6e7cd71a5f5e075c15ddf0df0..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Codevisionavr V3.md +++ /dev/null @@ -1,15 +0,0 @@ -

      Codevisionavr V3


      DOWNLOAD ★★★ https://urlca.com/2uDdWp



      - -CodeVisionAVR C compiler for Atmel AVR microcontrollers. CodeVisionAVR V3, in addition to its own IDE, can now be used as an extension, fully integrated ... Download CodeVisionAVR ... -- Download CodeVisionAVR with key -9 Jul. 2013 г. - Download CodeVisionAVR. -For beginner developers who want to learn how to program ICs, as well as for those who are already with them -CodeVisionAVR - Download CodeVisionAVR 2.0, CodeVisionAVR - program ... -CodeVisionAVR allows you to program microcontrollers of the family ... -Download, 4.44 MB. -CodeVisionAVRTools is a set of utilities for working with ... -13 Feb. -2020 г. - Download CodeVisionAVR 1.9.8. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/falterWliame/Face_Mask_Detection/DevWing LE Keygen ((EXCLUSIVE)).rar .rar.md b/spaces/falterWliame/Face_Mask_Detection/DevWing LE Keygen ((EXCLUSIVE)).rar .rar.md deleted file mode 100644 index 1723db3ace6226fcea99011856df37feb604d2b0..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/DevWing LE Keygen ((EXCLUSIVE)).rar .rar.md +++ /dev/null @@ -1,10 +0,0 @@ -

      DevWing LE keygen.rar .rar


      DOWNLOAD ····· https://urlca.com/2uDcOJ



      - -DevWing LE Keygen.rar .rar . FabFilter Total Bundle v2018.14.07 WiN crack. HTML5 exporter to download Clickteam Fusion 2.5 hack activation code. Download crack for clickteam fusion 2.5 - Crack for clickteam fusion 2.5, crack for microsoft office 2010. -Download Crack for Clickteam Fusion 2.5 Download Crack for clickteam fusion 2.5 - Crack for clickteam fusion 2.5, crack for microsoft office 2010. .clickteam fusion 2.5, crack for microsoft office 2010 .Clickteam Fusion 2.5, Crack for Clickteam Fusion 2.5 crack for Fuse 3.0 Crack does not have. -Fuse 3.0 download crack. -Clickteam fusion 2.5 crack. -Clickteam fusion 2.5 8a78ff9644
      -
      -
      -

      diff --git a/spaces/falterWliame/Face_Mask_Detection/Interstellar 1080p Br Rip Movies Torrents.md b/spaces/falterWliame/Face_Mask_Detection/Interstellar 1080p Br Rip Movies Torrents.md deleted file mode 100644 index d8930fdab3e60feec38a147ba0ef057ca10e5529..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Interstellar 1080p Br Rip Movies Torrents.md +++ /dev/null @@ -1,6 +0,0 @@ -

      interstellar 1080p br rip movies torrents


      Download Ziphttps://urlca.com/2uDcHB



      -
      -Interstellar Full Movie, Download Interstellar YTS & YIFY Torrent. ... Server P2P. BluRay. Download Torrent. 720p. BluRay. File size. 1.02 GB ... space travel and conquer the vast distances involved in an interstellar voyage. ... Quality: HD. 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/falterWliame/Face_Mask_Detection/Manusmriti In Telugu Pdf Free Download VERIFIED.md b/spaces/falterWliame/Face_Mask_Detection/Manusmriti In Telugu Pdf Free Download VERIFIED.md deleted file mode 100644 index ef5471d7130c13b75e04d7214eef7f40000ee82d..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Manusmriti In Telugu Pdf Free Download VERIFIED.md +++ /dev/null @@ -1,8 +0,0 @@ - -

      the manusmriti came to be known as the manava dharmashastra in the twelfth century. it is a samhitas (bhaskara) text that has been variously dated to the fifth, sixth, seventh and eighth centuries. its authorship, however, has been debated. the text was likely composed over a long period, due to the fact that it is composed of the same ancient sanskrit verse, but the changes are related to the local religious practices in different regions of india. the original text is divided into three parts, including the shastra (rules), the astra (chapter) and the shataka (stories). the manusmriti is divided into 69 chapters, although the names of some chapters vary in different versions.

      -

      if you are interested in manusmriti, which means “the laws of manu”, then you will find this book very useful. there is a brief introduction, which makes it very clear and understandable that the book is a compilation of old vedic teachings.

      -

      manusmriti in telugu pdf free download


      Download Zip ->>> https://urlca.com/2uDdG1



      -

      the manusmriti was composed of four works, by four authors. the first author was manu, the second is called vyasa. manu is the author of three works, the third is kautilya, the fourth is yajnavalkya. the manusmriti was composed in sanskrit.

      -

      the manusmriti is a textbook for people who want to do vedic rituals. it teaches us about our ancestors and ourselves, and how to get on in life. it is like the rambam in judaism, the bhagavad gita in hinduism and the koran in islam. it has kept the legacy of our forefathers alive for many years.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download DirectX End-User Runtime Web Installer for Windows.md b/spaces/fatiXbelha/sd/Download DirectX End-User Runtime Web Installer for Windows.md deleted file mode 100644 index 0a4b3ac43a0d917b7497a4c1ab5f61035e2dfd88..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download DirectX End-User Runtime Web Installer for Windows.md +++ /dev/null @@ -1,122 +0,0 @@ - -

      What is Download 35 and Why You Need It

      -

      If you are a Windows user, you may have encountered the term "download 35" while browsing the web or trying to run some games or applications. But what is download 35 exactly and why do you need it? In this article, we will explain what download 35 is, what are its benefits, how to download and install it, how to use it, and how to fix some common issues with it.

      -

      Download 35 Explained

      -

      What is Download 35?

      -

      Download 35 is not a single program or file, but a collection of different components that are required for some Windows applications and games to run properly. These components include:

      -

      download 35


      Download File →→→ https://urllie.com/2uNIxb



      -
        -
      • Microsoft .NET Framework 3.5: This is a software framework that provides a set of libraries and tools for developing and running applications that use .NET technologies, such as Windows Forms, WPF, ASP.NET, LINQ, WCF, etc. .NET Framework 3.5 includes .NET Framework 2.0 and 3.0 service packs.
      • -
      • DirectX End-User Runtime: This is a package that installs a number of runtime libraries from the legacy DirectX SDK for some games that use D3DX9, D3DX10, D3DX11, XAudio 2.7, XInput 1.3, XACT, and/or Managed DirectX 1.1. These libraries are not part of the DirectX Runtime installed on your Windows OS.
      • -
      -

      Some applications and games may require these components to function correctly, especially if they were developed using older versions of .NET or DirectX. If you don't have these components installed on your system, you may encounter errors or crashes when trying to run these applications or games.

      -

      What are the Benefits of Download 35?

      -

      By installing download 35 on your system, you can enjoy the following benefits:

      -

      download 35mm film
      -download 35 and ticking
      -download 35 hp maps for cs 1.6
      -download 35 sai no koukousei
      -download 35xxxv deluxe edition
      -download 35mm musical
      -download 35 sai no shoujo
      -download 35 akhri path
      -download 35 and single
      -download 35 sai no koukousei sub indo
      -download 35 hp cs go
      -download 35 and ticking movie
      -download 35 sai no koukousei eng sub
      -download 35 sai no koukousei batch
      -download 35 sai no koukousei ost
      -download 35 sai no shoujo sub indo
      -download 35 sai no shoujo eng sub
      -download 35 sai no shoujo batch
      -download 35 sai no shoujo ost
      -download 35 akhri path pdf
      -download 35 akhri path mp3
      -download 35 akhri path video
      -download 35 akhri path in punjabi
      -download 35 akhri path app
      -download 35 and single documentary
      -download .net framework 3.5 offline installer windows 10
      -download .net framework 3.5 offline installer windows server 2012 r2
      -download .net framework 3.5 offline installer windows server 2016
      -download .net framework 3.5 offline installer windows server 2019
      -download .net framework 3.5 offline installer windows server 2008 r2
      -download .net framework 3.5 offline installer windows xp sp3
      -download .net framework 3.5 offline installer windows vista sp2
      -download .net framework 3.5 offline installer windows server core
      -download .net framework 3.5 offline installer windows embedded standard 7 sp1
      -download .net framework 3.5 offline installer windows embedded posready2009 sp3
      -download directx end-user runtime web installer windows xp sp3
      -download directx end-user runtime web installer windows vista sp2
      -download directx end-user runtime web installer windows server core
      -download directx end-user runtime web installer windows embedded standard
      -download directx end-user runtime web installer windows embedded posready2009 sp3

      -
        -
      • Compatibility: You can run applications and games that require .NET Framework 3.5 or DirectX End-User Runtime without any issues.
      • -
      • Performance: You can improve the performance and stability of your applications and games by using the latest versions of .NET Framework and DirectX Runtime.
      • -
      • Security: You can protect your system from potential vulnerabilities by applying the latest updates and patches for .NET Framework and DirectX Runtime.
      • -
      -

      How to Download and Install Download 35

      -

      How to Download Download 35

      -

      To download download 35, you need to visit the official Microsoft websites for .NET Framework 3.5 and DirectX End-User Runtime. You can also use the links below:

      - - - - -
      ComponentDownload Link
      .NET Framework 3.5[text](^1^)
      DirectX End-User Runtime[text](^3^)
      -

      Once you have downloaded the files, you need to save them in a location that you can easily access later.

      -

      How to Install Download 35

      -

      To install download 35, you need to follow these steps:

      -
        -
      1. Run the .NET Framework 3.5 installerRun the .NET Framework 3.5 installer: Double-click on the file that you downloaded and follow the instructions on the screen. You may need to restart your computer after the installation is complete. - Run the DirectX End-User Runtime installer: Double-click on the file that you downloaded and follow the instructions on the screen. You may need to accept the Microsoft Software License Terms and choose a location to extract the files. After that, you need to open the extracted folder and run the DXSETUP.exe file. You may need to restart your computer after the installation is complete.
      -

      Congratulations, you have successfully installed download 35 on your system!

      -

      How to Use Download 35

      -

      How to Run Apps with Download 35

      -

      To run applications or games that require download 35, you just need to launch them as usual. If they are compatible with .NET Framework 3.5 or DirectX End-User Runtime, they should run smoothly and without errors. However, some applications or games may require additional settings or configurations to work properly. In that case, you should refer to the documentation or support of the specific application or game for more information.

      -

      How to Update Download 35

      -

      To update download 35, you need to check for updates for .NET Framework 3.5 and DirectX End-User Runtime separately. You can do this by using the following methods:

      -
        -
      • For .NET Framework 3.5: You can use Windows Update to check for and install updates for .NET Framework 3.5. You can also download and install the latest service pack for .NET Framework 3.5 from the Microsoft website.
      • -
      • For DirectX End-User Runtime: You can use Windows Update to check for and install updates for DirectX End-User Runtime. You can also download and install the latest version of DirectX End-User Runtime from the Microsoft website.
      • -
      -

      It is recommended that you keep download 35 updated to ensure optimal performance and security of your applications and games.

      -

      Common Issues and Solutions with Download 35

      -

      How to Fix Download 35 Errors

      -

      Sometimes, you may encounter errors or issues with download 35 that prevent you from running your applications or games properly. Some of the common errors or issues are:

      -
        -
      • .NET Framework Initialization Error: This error occurs when an application requires a version of .NET Framework that is not installed on your system. To fix this error, you need to install the required version of .NET Framework or update your application to use a compatible version of .NET Framework.
      • -
      • D3DX9_XX.DLL Not Found or Missing: This error occurs when an application or game requires a specific version of D3DX9 library that is not installed on your system. To fix this error, you need to install the DirectX End-User Runtime package that contains the missing library.
      • -
      • Application Has Stopped Working: This error occurs when an application or game crashes due to various reasons, such as incompatible hardware, corrupted files, outdated drivers, etc. To fix this error, you need to troubleshoot the cause of the crash and apply the appropriate solution.
      • -
      -

      If you encounter any other errors or issues with download 35, you can search for solutions online or contact the support team of the application or game that is causing the problem.

      -

      How to Uninstall Download 35

      -

      If you want to uninstall download 35 from your system, you need to uninstall .NET Framework 3.5 and DirectX End-User Runtime separately. You can do this by using the following methods:

      -
        -
      • For .NET Framework 3.5: You can use Windows Features to turn off .NET Framework 3.5 from your system. To do this, go to Control Panel > Programs > Turn Windows features on or off > Uncheck .NET Framework 3.5 (includes .NET 2.0 and 3.0) > OK.
      • -
      • For DirectX End-User Runtime: You can use Programs and Features to uninstall DirectX End-User Runtime from your system. To do this, go to Control Panel > Programs > Programs and Features > Find DirectX End-User Runtime Web Installer > Right-click > Uninstall.
      • -
      -

      Note that uninstalling download 35 may affect the functionality of some applications or games that depend on it.

      -

      ConclusionConclusion

      -

      In this article, we have learned what download 35 is, why you need it, how to download and install it, how to use it, and how to fix some common issues with it. Download 35 is a collection of components that are required for some Windows applications and games to run properly. By installing download 35, you can enjoy compatibility, performance, and security benefits. However, you may also encounter some errors or issues with download 35 that can be resolved by following the solutions provided in this article. We hope that this article has helped you understand and use download 35 better.

      -

      FAQs

      -

      What is the difference between .NET Framework and DirectX?

      -

      .NET Framework and DirectX are two different software frameworks that provide different functionalities for applications and games. .NET Framework is a software framework that provides a set of libraries and tools for developing and running applications that use .NET technologies, such as Windows Forms, WPF, ASP.NET, LINQ, WCF, etc. DirectX is a software framework that provides a set of APIs and libraries for developing and running applications and games that use multimedia features, such as graphics, sound, video, input, etc.

      -

      Do I need to install both .NET Framework 3.5 and DirectX End-User Runtime?

      -

      It depends on the requirements of the applications or games that you want to run. Some applications or games may require only one of them, while others may require both of them. You can check the documentation or support of the specific application or game to find out what components it needs.

      -

      Can I install other versions of .NET Framework or DirectX?

      -

      Yes, you can install other versions of .NET Framework or DirectX on your system. However, you should be aware that some applications or games may not be compatible with newer or older versions of .NET Framework or DirectX. Therefore, you should always check the compatibility before installing other versions of .NET Framework or DirectX.

      -

      How can I check if I have download 35 installed on my system?

      -

      You can check if you have download 35 installed on your system by using the following methods:

      -
        -
      • For .NET Framework 3.5: You can use Windows Features to see if .NET Framework 3.5 is turned on on your system. To do this, go to Control Panel > Programs > Turn Windows features on or off > Check if .NET Framework 3.5 (includes .NET 2.0 and 3.0) is checked.
      • -
      • For DirectX End-User Runtime: You can use DirectX Diagnostic Tool to see what version of DirectX Runtime is installed on your system. To do this, go to Start > Run > Type dxdiag > OK > Check the DirectX Version on the System tab.
      • -
      -

      Where can I get more information or help about download 35?

      -

      If you need more information or help about download 35, you can visit the following websites:

      -
        -
      • For .NET Framework 3.5: You can visit the Microsoft website for more information about .NET Framework 3.5, such as features, downloads, updates, documentation, support, etc.
      • -
      • For DirectX End-User Runtime: You can visit the Microsoft website for more information about DirectX End-User Runtime, such as features, downloads, updates, documentation, support, etc.
      • -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fatmacankara/ASCARIS/code/add_3Dalignment.py b/spaces/fatmacankara/ASCARIS/code/add_3Dalignment.py deleted file mode 100644 index 6f63fd6ba7cdb091b3797dcedd572e73a19b75ad..0000000000000000000000000000000000000000 --- a/spaces/fatmacankara/ASCARIS/code/add_3Dalignment.py +++ /dev/null @@ -1,261 +0,0 @@ -""" -This code file produces alignments between the structure and the sequence for a given protein. - -""" - -import math -import glob -import numpy as np -from Bio import Align -import gzip -from pathlib import Path -from Bio.Align import substitution_matrices -aligner = Align.PairwiseAligner() - -def distance(x1, y1, z1, x2, y2, z2): - d = math.sqrt(math.pow(x2 - x1, 2) + - math.pow(y2 - y1, 2) + - math.pow(z2 - z1, 2) * 1.0) - return d - - -def find_distance(coordMut, coordAnnot): - if coordMut != np.NaN: - try: - dist = distance(float(coordMut[0]), float(coordMut[1]), float(coordMut[2]), float(coordAnnot[0]), - float(coordAnnot[1]), float(coordAnnot[2])) - return "%.2f" % dist - except: - ValueError - dist = 'nan' - return dist - else: - return np.NaN - - -def threeToOne(variant): - if variant == "ALA": - variant = "A" - elif variant == "ARG": - variant = "R" - elif variant == "VAL": - variant = "V" - elif variant == "GLU": - variant = "E" - elif variant == "PRO": - variant = "P" - elif variant == "LEU": - variant = "L" - elif variant == "GLY": - variant = "G" - elif variant == "ASN": - variant = "N" - elif variant == "SER": - variant = "S" - elif variant == "GLN": - variant = "Q" - elif variant == "THR": - variant = "T" - elif variant == "MET": - variant = "M" - elif variant == "LYS": - variant = "K" - elif variant == "ASP": - variant = "D" - elif variant == "ILE": - variant = "I" - elif variant == "PHE": - variant = "F" - elif variant == "TRP": - variant = "W" - elif variant == "TYR": - variant = "Y" - elif variant == "HIS": - variant = "H" - elif variant == "CYS": - variant = "C" - elif variant == 'UNK': - variant = 'X' - elif variant == 'ASX': - variant = 'O' - return (variant) - - -def get_coords(annot, alignments, coords, resnums_for_sasa, mode): - if mode == 1: - for alignment in alignments[0]: - alignment = (str(alignment).strip().split('\n')) - startGap = 0 - if alignment[0].startswith('.'): - for k in alignment[0]: - if k == '.' or k == '-': - startGap += 1 - else: - break - countGap = startGap - countResidue = 0 - for j in alignment[0][startGap:]: - if j == '.' or j == '-': - countGap += 1 - else: - countResidue += 1 - if countResidue == float(annot): - break - countGap_pdb = 0 - countResidue_pdb = 0 - for m in alignment[2][0:countResidue + countGap - 1]: - if m == '.' or m == '-': - countGap_pdb += 1 - posAtom = countResidue + countGap - countGap_pdb - - realpdbStart = 0 - for j in alignment[2]: - if j == '.' or j == '-': - realpdbStart += 1 - else: - break - - if (alignment[2][countResidue + countGap - 1] != '-') and (float(annot) >= float(realpdbStart) + 1): - try: - coordinates = alignments[1] - residue_numbers = alignments[2] - coordWeWant = coordinates[posAtom - 1] - residue_number_we_want = residue_numbers[posAtom - 1] - - except: - IndexError - coordWeWant = 'nan' - else: - coordWeWant = 'nan' - return coordWeWant, posAtom, residue_number_we_want - if mode == 2: - if annot != 'nan': - if int(annot) <= 1400: - alignment = (str(alignments).strip().split('\n')) - startGap = 0 - if alignment[0].startswith('.'): - for k in alignment[0]: - if k == '.' or k == '-': - startGap += 1 - else: - break - countGap = startGap - countResidue = 0 - for j in alignment[0][startGap:]: - if j == '.' or j == '-': - countGap += 1 - else: - countResidue += 1 - if countResidue == float(annot): - break - countGap_pdb = 0 - countResidue_pdb = 0 - for m in alignment[2][0:countResidue + countGap - 1]: - if m == '.' or m == '-': - countGap_pdb += 1 - posAtom = countResidue + countGap - countGap_pdb - realpdbStart = 0 - for j in alignment[2]: - if j == '.' or j == '-': - realpdbStart += 1 - else: - break - if len(alignment[2]) > (countResidue + countGap - 1): - if (alignment[2][countResidue + countGap - 1] != '-') and (float(annot) >= float(realpdbStart) + 1): - try: - coordinates = coords - residue_numbers = resnums_for_sasa - coordWeWant = coordinates[posAtom - 1] - residue_number_we_want = residue_numbers[posAtom - 1] - except: - IndexError - coordWeWant = 'nan' - residue_number_we_want = 'nan' - else: - coordWeWant = 'nan' - residue_number_we_want = 'nan' - return coordWeWant, posAtom, residue_number_we_want - else: - coordWeWant = 'nan' - residue_number_we_want = 'nan' - return coordWeWant, posAtom, residue_number_we_want - else: - return np.NaN, np.NaN, np.NaN - else: - return np.NaN, np.NaN, np.NaN - - -def get_alignments_3D(identifier, model_num, pdb_path, pdbSequence, source, chain, pdbID, mode, path_3D_alignment,file_format = 'gzip'): - if mode == 1: - atomSequence = '' - coords = [] - resnums_for_sasa = [] - with open(pdb_path, encoding="utf8") as f: - for line in f.readlines(): - if source != 'MODBASE': - if line[0:4].strip() == 'ATOM' and line[13:15].strip() == 'CA' and line[21].upper() == chain.upper(): - atomSequence += threeToOne(line[17:20].strip()) - coords.append([line[31:38].strip(), line[39:46].strip(), line[47:54].strip()]) - resnums_for_sasa.append(line[22:26].strip()) - elif line[0:4].strip() == 'ATOM' and line[13:15].strip() == 'CA' and line[21] == ' ': - atomSequence += threeToOne(line[17:20].strip()) - coords.append([line[31:38].strip(), line[39:46].strip(), line[47:54].strip()]) - resnums_for_sasa.append(line[22:26].strip()) - else: - if line[0:7].strip() == 'ATOM' and line[13:15].strip() == 'CA': - atomSequence += threeToOne(line[17:20].strip()) - coords.append([line[31:38].strip(), line[39:46].strip(), line[47:54].strip()]) - resnums_for_sasa.append(line[22:26].strip()) - - f = open(Path(path_3D_alignment / f'{identifier}_{pdbID}_{str(chain)}_alignment.txt'),"w") - - aligner.mode = 'local' - aligner.substitution_matrix = substitution_matrices.load("BLOSUM62") - aligner.open_gap_score = -11 - aligner.extend_gap_score = -1 - alignments = aligner.align(pdbSequence, atomSequence) - alignments = (list(alignments)) - for alignment in alignments: - f.write(str(alignment)) - f.write('\n') - f.write('\n') - return alignments, coords, resnums_for_sasa - elif mode==2: - atomSequence = '' - coords = [] - resnums_for_sasa = [] - if file_format == 'txt': - with open(name, encoding="utf8") as f: - for line in f.readlines(): - if line[0:4].strip() == 'ATOM' and line[13:15].strip() == 'CA': - atomSequence += threeToOne(line[17:20].strip()) - coords.append([line[31:38].strip(), line[39:46].strip(), line[47:54].strip()]) - resnums_for_sasa.append(line[22:26].strip()) - elif line[0:4].strip() == 'ATOM' and line[13:15].strip() == 'CA' and line[21] == ' ': - atomSequence += threeToOne(line[17:20].strip()) - coords.append([line[31:38].strip(), line[39:46].strip(), line[47:54].strip()]) - resnums_for_sasa.append(line[22:26].strip()) - elif file_format == 'gzip': - with gzip.open(pdb_path, mode='rb') as f: - for line in f: - line = line.decode() - if line[0:4].strip() == 'ATOM' and line[13:15].strip() == 'CA': - atomSequence += threeToOne(line[17:20].strip()) - coords.append([line[31:38].strip(), line[39:46].strip(), line[47:54].strip()]) - resnums_for_sasa.append(line[22:26].strip()) - elif line[0:4].strip() == 'ATOM' and line[13:15].strip() == 'CA' and line[21] == ' ': - atomSequence += threeToOne(line[17:20].strip()) - coords.append([line[31:38].strip(), line[39:46].strip(), line[47:54].strip()]) - resnums_for_sasa.append(line[22:26].strip()) - f = open(Path(path_3D_alignment / f'{identifier}_{str(model_num)}_3Dalignment.txt'),"w") - aligner.mode = 'local' - aligner.substitution_matrix = substitution_matrices.load("BLOSUM62") - aligner.open_gap_score = -11 - aligner.extend_gap_score = -1 - alignments = aligner.align(pdbSequence, atomSequence) - alignments = (list(alignments)) - for alignment in alignments: - f.write(str(alignment)) - f.write('\n') - f.write('\n') - return alignments, coords, resnums_for_sasa diff --git a/spaces/fclong/summary/fengshen/examples/classification/finetune_classification_zen1-base_afqmc.sh b/spaces/fclong/summary/fengshen/examples/classification/finetune_classification_zen1-base_afqmc.sh deleted file mode 100644 index 845e93093cc6390db2c332c22e860ff88688a657..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/examples/classification/finetune_classification_zen1-base_afqmc.sh +++ /dev/null @@ -1,151 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=afqmc-bart-base # create a short name for your job -#SBATCH --nodes=1 # node count -#SBATCH --ntasks=2 # total number of tasks across all nodes -#SBATCH --cpus-per-task=30 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH --gres=gpu:2 # number of gpus per node -#SBATCH --mail-type=ALL # send email when job begins, ends or failed etc. -#SBATCH -o %x-%j.log # output and error file name (%x=job name, %j=job id) - - -export TORCH_EXTENSIONS_DIR=/cognitive_comp/ganruyi/tmp/torch_extendsions - -MODEL_NAME=fengshen-zen1 - -TASK=afqmc -TEXTA_NAME=sentence1 -TEXTB_NAME=sentence2 -LABEL_NAME=label -ID_NAME=id - - -BATCH_SIZE=8 -VAL_BATCH_SIZE=32 -ZERO_STAGE=1 -STRATEGY=deepspeed_stage_${ZERO_STAGE} - -ROOT_DIR=/cognitive_comp/ganruyi/experiments/classification_finetune/${MODEL_NAME}_${TASK} -if [ ! -d ${ROOT_DIR} ];then - mkdir -p ${ROOT_DIR} - echo ${ROOT_DIR} created!!!!!!!!!!!!!! -else - echo ${ROOT_DIR} exist!!!!!!!!!!!!!!! -fi - -DATA_DIR=/cognitive_comp/yangping/data/ChineseCLUE_DATA/${TASK}_public/ -PRETRAINED_MODEL_PATH=/cognitive_comp/ganruyi/hf_models/zen/ZEN_pretrain_base_v0.1.0 - -CHECKPOINT_PATH=${ROOT_DIR}/ckpt/ -OUTPUT_PATH=${ROOT_DIR}/predict.json - - -config_json="${ROOT_DIR}/ds_config.json" -# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() -# reduce_bucket_size: hidden_size*hidden_size -# stage3_prefetch_bucket_size: 0.9 * hidden_size * hidden_size -# stage3_param_persistence_threshold: 10 * hidden_size - -cat < $config_json -{ - "train_micro_batch_size_per_gpu": $BATCH_SIZE, - "steps_per_print": 100, - "gradient_clipping": 0.1, - "zero_optimization": { - "stage": ${ZERO_STAGE} - }, - "optimizer": { - "type": "Adam", - "params": { - "lr": 1e-7, - "eps": 1e-12, - "weight_decay": 1e-2 - } - }, - "scheduler": { - "type": "WarmupLR", - "params":{ - "warmup_min_lr": 1e-5, - "warmup_max_lr": 1e-4, - "warmup_num_steps": 400, - "warmup_type": "linear" - } - }, - "zero_allow_untested_optimizer": false, - "fp16": { - "enabled": false, - "loss_scale": 0, - "loss_scale_window": 1000, - "hysteresis": 2, - "min_loss_scale": 1 - }, - "activation_checkpointing": { - "partition_activations": false, - "contiguous_memory_optimization": false - }, - "wall_clock_breakdown": false -} -EOT - -export PL_DEEPSPEED_CONFIG_PATH=$config_json - - -DATA_ARGS="\ - --data_dir $DATA_DIR \ - --train_data train.json \ - --valid_data dev.json \ - --test_data test.json \ - --train_batchsize $BATCH_SIZE \ - --valid_batchsize $VAL_BATCH_SIZE \ - --max_length 64 \ - --texta_name $TEXTA_NAME \ - --textb_name $TEXTB_NAME \ - --label_name $LABEL_NAME \ - --id_name $ID_NAME \ - " - -MODEL_ARGS="\ - --learning_rate 1e-5 \ - --weight_decay 1e-2 \ - --warmup 0.01 \ - --num_labels 2 \ - " - -MODEL_CHECKPOINT_ARGS="\ - --monitor val_acc \ - --save_top_k 3 \ - --mode max \ - --every_n_train_steps 200 \ - --save_weights_only True \ - --dirpath $CHECKPOINT_PATH \ - --filename model-{epoch:02d}-{val_acc:.4f} \ - " - - -TRAINER_ARGS="\ - --max_epochs 10 \ - --gpus 1 \ - --num_nodes 1 \ - --strategy $STRATEGY \ - --gradient_clip_val 1.0 \ - --check_val_every_n_epoch 1 \ - --val_check_interval 1.0 \ - --default_root_dir $ROOT_DIR \ - " - -options=" \ - --pretrained_model_path $PRETRAINED_MODEL_PATH \ - --output_save_path $OUTPUT_PATH \ - $DATA_ARGS \ - $MODEL_ARGS \ - $MODEL_CHECKPOINT_ARGS \ - $TRAINER_ARGS \ - " - -SINGULARITY_PATH=/cognitive_comp/ganruyi/pytorch21_06_py3_docker_image_v2.sif -SCRIPT_PATH=/cognitive_comp/ganruyi/Fengshenbang-LM/fengshen/examples/classification/finetune_classification.py - -# python3 $SCRIPT_PATH $options -source activate base -# srun singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $SINGULARITY_PATH /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options -/home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options - diff --git a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/utils/ffhq_dataset/face_alignment.py b/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/utils/ffhq_dataset/face_alignment.py deleted file mode 100644 index 9f62666dc83a83d4e95446b445b9145dfe11f77c..0000000000000000000000000000000000000000 --- a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/utils/ffhq_dataset/face_alignment.py +++ /dev/null @@ -1,99 +0,0 @@ -import numpy as np -import scipy.ndimage -import os -import PIL.Image - - -def image_align(src_file, dst_file, face_landmarks, resize=True, output_size=1024, transform_size=4096, enable_padding=True): - # Align function from FFHQ dataset pre-processing step - # https://github.com/NVlabs/ffhq-dataset/blob/master/download_ffhq.py - - lm = np.array(face_landmarks) - lm_chin = lm[0 : 17] # left-right - lm_eyebrow_left = lm[17 : 22] # left-right - lm_eyebrow_right = lm[22 : 27] # left-right - lm_nose = lm[27 : 31] # top-down - lm_nostrils = lm[31 : 36] # top-down - lm_eye_left = lm[36 : 42] # left-clockwise - lm_eye_right = lm[42 : 48] # left-clockwise - lm_mouth_outer = lm[48 : 60] # left-clockwise - lm_mouth_inner = lm[60 : 68] # left-clockwise - - # Calculate auxiliary vectors. - eye_left = np.mean(lm_eye_left, axis=0) - eye_right = np.mean(lm_eye_right, axis=0) - eye_avg = (eye_left + eye_right) * 0.5 - eye_to_eye = eye_right - eye_left - mouth_left = lm_mouth_outer[0] - mouth_right = lm_mouth_outer[6] - mouth_avg = (mouth_left + mouth_right) * 0.5 - eye_to_mouth = mouth_avg - eye_avg - - # Choose oriented crop rectangle. - x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] - x /= np.hypot(*x) - x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) - y = np.flipud(x) * [-1, 1] - c = eye_avg + eye_to_mouth * 0.1 - quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) - qsize = np.hypot(*x) * 2 - - # Load in-the-wild image. - if not os.path.isfile(src_file): - print('\nCannot find source image. Please run "--wilds" before "--align".') - return - #img = cv2.imread(src_file) - #img = PIL.Image.fromarray(img) - img = PIL.Image.open(src_file) - - # Shrink. - shrink = int(np.floor(qsize / output_size * 0.5)) - if shrink > 1: - rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink))) - img = img.resize(rsize, PIL.Image.ANTIALIAS) - quad /= shrink - qsize /= shrink - - # Crop. - border = max(int(np.rint(qsize * 0.1)), 3) - crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1])))) - crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1])) - if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]: - img = img.crop(crop) - quad -= crop[0:2] - - # Pad. - pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1])))) - pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0)) - if enable_padding and max(pad) > border - 4: - img = np.float32(img) - if img.ndim == 2: - img = np.stack((img,)*3, axis=-1) - pad = np.maximum(pad, int(np.rint(qsize * 0.3))) - img = np.pad(img, ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect') - h, w, _ = img.shape - y, x, _ = np.ogrid[:h, :w, :1] - mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3])) - blur = qsize * 0.02 - img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) - img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0) - img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB') - quad += pad[:2] - - xmin, xmax = np.amin(quad[:,0]), np.amax(quad[:,0]) - ymin, ymax = np.amin(quad[:,1]), np.amax(quad[:,1]) - quad_size = int(max(xmax-xmin, ymax-ymin)+0.5) - - if not resize: - transform_size = output_size = quad_size - - - # Transform. - img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR) - if output_size < transform_size: - img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS) - - # Save aligned image. - os.makedirs(os.path.dirname(dst_file), exist_ok=True) - img.save(dst_file, 'PNG') - return quad_size diff --git a/spaces/fengjianliang/bingo/README.md b/spaces/fengjianliang/bingo/README.md deleted file mode 100644 index 5d6936218874c647b5d22e13ad4be7edb8936f92..0000000000000000000000000000000000000000 --- a/spaces/fengjianliang/bingo/README.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: bingo -emoji: 😊 -colorFrom: red -colorTo: red -sdk: docker -license: mit -duplicated_from: hf4all/bingo ---- - -
      - -# Bingo - -Bingo,一个让你呼吸顺畅 New Bing。 - -高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。 - -![Github stars](https://badgen.net/github/stars/weaigc/bingo?icon=github&label=stars) -![Gthub issues](https://img.shields.io/github/issues/weaigc/bingo) -[![docker build](https://github.com/weaigc/bingo/actions/workflows/docker.yml/badge.svg)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![docker hub](https://badgen.net/docker/size/weaigc/bingo?icon=docker&label=image%20size)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![MIT License](https://img.shields.io/badge/license-MIT-97c50f)](https://github.com/weaigc/bingo/blob/main/license) - -问题反馈请前往 https://github.com/weaigc/bingo/issues -
      - - diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Become a Mafia Boss with Mafia City Mod APK for Android.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Become a Mafia Boss with Mafia City Mod APK for Android.md deleted file mode 100644 index c8afad6ebf2f078dd25d28aeca8fdba3ac926903..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Become a Mafia Boss with Mafia City Mod APK for Android.md +++ /dev/null @@ -1,102 +0,0 @@ - -

      Mafia City Mod APK Download for Android

      -

      Do you love playing mafia-themed games on your android device? If yes, then you might have heard of Mafia City, one of the most popular and realistic strategy games in this genre. In this game, you can build your own criminal empire, recruit gangsters, fight with rivals, and rule the underworld. But what if you want to enjoy more features and benefits in this game without spending real money or waiting for long hours? Well, there is a solution for that. You can download and install Mafia City Mod APK, a modified version of the original game that gives you unlimited resources, unlocks all levels, removes ads, and much more. In this article, we will tell you everything you need to know about Mafia City Mod APK, including what it is, how to download and install it, what features it offers, and what are its pros and cons. So, without further ado, let's get started.

      -

      What is Mafia City?

      -

      Mafia City is a strategy game developed by YottaGame for android devices. It was released in 2017 and has since gained millions of fans around the world. The game is set in a fictional city where you can create your own mafia family, recruit gangsters, upgrade your buildings, expand your territory, and compete with other players online. You can also join clans, form alliances, participate in events, and enjoy various modes of gameplay. The game has stunning graphics, realistic sound effects, and immersive storyline that will keep you hooked for hours.

      -

      mafia city mod apk download for android


      Download File --->>> https://gohhs.com/2uPvDI



      -

      What is Mafia City Mod APK?

      -

      Mafia City Mod APK is a modified version of the official Mafia City game that has been altered by some third-party developers. The modded version gives you access to unlimited resources such as gold, cash, gems, energy, etc. that you can use to buy anything you want in the game. It also unlocks all the levels, so you can play any stage you like without any restrictions. Moreover, it unlocks all the VIP premium accessories such as weapons, cars, outfits, etc. that you can equip to your gangsters and make them more powerful. Additionally, it removes all the annoying ads that pop up on your screen while playing the game. And if that's not enough, it also offers some other features such as anti-ban protection, auto-update function, easy user interface, etc.

      -

      How to Download and Install Mafia City Mod APK?

      -

      If you are interested in downloading and installing Mafia City Mod APK on your android device, then you need to follow these simple steps:

      -

      mafia city mod apk unlimited gold and money
      -mafia city mod apk latest version 2023
      -mafia city mod apk offline
      -mafia city mod apk unlimited gems and coins
      -mafia city mod apk hack
      -mafia city mod apk free shopping
      -mafia city mod apk revdl
      -mafia city mod apk rexdl
      -mafia city mod apk android 1
      -mafia city mod apk unlimited everything
      -mafia city mod apk no root
      -mafia city mod apk vip unlocked
      -mafia city mod apk unlimited troops
      -mafia city mod apk obb
      -mafia city mod apk unlimited resources
      -mafia city mod apk online
      -mafia city mod apk unlimited cash and gold
      -mafia city mod apk happymod
      -mafia city mod apk an1
      -mafia city mod apk anti ban
      -mafia city mod apk all unlocked
      -mafia city mod apk blackmod
      -mafia city mod apk by getmodsapk.com[^1^]
      -mafia city mod apk cheat
      -mafia city mod apk download latest version
      -mafia city mod apk download apkpure
      -mafia city mod apk download for pc
      -mafia city mod apk download 2022
      -mafia city mod apk download uptodown
      -mafia city mod apk download android 11
      -mafia city mod apk free download for android
      -mafia city mod apk full unlocked
      -mafia city mod apk god mode
      -mafia city mod apk generator
      -mafia city mod apk high damage
      -mafia city mod apk highly compressed
      -mafia city mod apk ios download
      -mafia city mod apk ihackedit
      -mafia city mod apk install
      -mafia city mod apk latest update 2023
      -mafia city mod apk mega.nz
      -mafia city mod apk mediafıre link 2023
      -mafia city mod apk new version 2023 download for android
      -mafia city mod apk no verification
      -mafia city mod apk original
      -mafia city mod apk premium
      -mafia city mod apk pro
      -mafia city mod apk pure
      -mafia city mod apk platinmods

      -
        -
      1. First of all, you need to uninstall the original Mafia City game from your device if you have it installed.
      2. -
      3. Then, you need to enable the "Unknown Sources" option on your device settings. This will allow you to install apps from sources other than Google Play Store.
      4. -
      5. Next, you need to download the Mafia City Mod APK file from a reliable source on the internet. You can use this link to download it.
      6. -
      7. After downloading the file, you need to locate it on your device storage and tap on it to start the installation process.
      8. Follow the instructions on the screen and wait for the installation to complete. -
      9. Once the installation is done, you can launch the game and enjoy the modded version.
      10. -
      -

      Note: You may need to allow some permissions to the app for it to run properly. Also, make sure you have enough space on your device before installing the app.

      -

      Features of Mafia City Mod APK

      -

      Mafia City Mod APK offers a lot of features that make the game more fun and exciting. Here are some of the main features of the modded version:

      -

      Unlimited Gold

      -

      Gold is the premium currency in Mafia City that you can use to buy various items and resources in the game. However, gold is very hard to earn and expensive to buy with real money. With Mafia City Mod APK, you don't have to worry about that. You can get unlimited gold for free and spend it as much as you want. You can use gold to upgrade your buildings, recruit more gangsters, buy VIP items, speed up tasks, and more.

      -

      Unlock All Levels

      -

      Mafia City has hundreds of levels that you can play and enjoy. However, some of the levels are locked and require you to reach a certain level or complete certain missions to unlock them. This can be frustrating and time-consuming. With Mafia City Mod APK, you can unlock all the levels in the game and play any stage you like. You can also skip the tutorials and cutscenes if you want.

      -

      Unlock All VIP Premium Accessories

      -

      Mafia City has a lot of VIP premium accessories that you can equip to your gangsters and make them more powerful and stylish. These accessories include weapons, cars, outfits, tattoos, etc. However, these accessories are very expensive and require you to have a VIP membership or spend real money to get them. With Mafia City Mod APK, you can unlock all the VIP premium accessories in the game and use them for free. You can also customize your gangsters according to your preference.

      -

      No Ads

      -

      Mafia City is a free-to-play game, but it also has a lot of ads that pop up on your screen while playing the game. These ads can be annoying and distracting, and sometimes they can even affect your gameplay. With Mafia City Mod APK, you can remove all the ads from the game and enjoy a smooth and uninterrupted gaming experience.

      -

      Other Features

      -

      Besides the features mentioned above, Mafia City Mod APK also offers some other features such as:

      -
        -
      • Anti-ban protection: This feature prevents your account from getting banned by the game developers for using the modded version.
      • -
      • Auto-update function: This feature ensures that your app is always updated with the latest version and features of the game.
      • -
      • Easy user interface: This feature makes the app easy to use and navigate for anyone.
      • -
      • Compatible with all android devices: This feature makes the app compatible with any android device, regardless of its model or version.
      • -
      -

      Pros and Cons of Mafia City Mod APK

      -

      Mafia City Mod APK has its own pros and cons that you should consider before downloading and installing it. Here is a table comparing the advantages and disadvantages of the modded version:

      - | Pros | Cons | | --- | --- | | Unlimited resources | May cause lag or crash | | Unlock all levels | May not work on some devices | | Unlock all VIP premium accessories | May not be safe or secure | | No ads | May violate the game's terms of service | | Other features | May ruin the original gameplay |

      FAQs about Mafia City Mod APK

      -

      Here are some of the frequently asked questions and answers about Mafia City Mod APK:

      -
        -
      1. Is Mafia City Mod APK free?
        Yes, Mafia City Mod APK is free to download and use. You don't have to pay anything to enjoy its features.
      2. -
      3. Is Mafia City Mod APK safe?
        Mafia City Mod APK is not an official app from YottaGame, so it may not be safe or secure to use. It may contain viruses or malware that can harm your device or steal your data. Therefore, you should download it from a trusted source and scan it with an antivirus before installing it.
      4. -
      5. Is Mafia City Mod APK legal?
        Mafia City Mod APK is not a legal app, as it violates the game's terms of service and infringes its intellectual property rights. Using it may result in legal actions from YottaGame or Google Play Store. Therefore, you should use it at your own risk and responsibility.
      6. -
      7. Can I play Can I play Mafia City Mod APK online?
        Yes, you can play Mafia City Mod APK online with other players. However, you may face some issues or errors while connecting to the game server or joining a clan. Also, you may not be able to access some of the online features or events of the game.
      8. -
      9. Can I update Mafia City Mod APK?
        Yes, you can update Mafia City Mod APK whenever there is a new version available. However, you may lose some of the modded features or data after updating the app. Therefore, you should backup your progress before updating the app.
      10. -
      -

      Conclusion

      -

      Mafia City Mod APK is a great option for those who want to enjoy more features and benefits in Mafia City game without spending real money or waiting for long hours. It gives you unlimited resources, unlocks all levels, unlocks all VIP premium accessories, removes ads, and offers other features that make the game more fun and exciting. However, it also has some drawbacks that you should be aware of before downloading and installing it. It may cause lag or crash, may not work on some devices, may not be safe or secure, may violate the game's terms of service, and may ruin the original gameplay. Therefore, you should use it at your own risk and responsibility.

      -

      If you are interested in downloading and installing Mafia City Mod APK on your android device, then you can follow the steps mentioned above in this article. You can also check out the features, pros and cons, and FAQs of the modded version in this article. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading.

      -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bus Simulator Indonesia OBB Mod APK The Most Realistic and Fun Bus Simulation Game for Android.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bus Simulator Indonesia OBB Mod APK The Most Realistic and Fun Bus Simulation Game for Android.md deleted file mode 100644 index 6308fddb4b617cbfae86c45ff407ca42afd12837..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bus Simulator Indonesia OBB Mod APK The Most Realistic and Fun Bus Simulation Game for Android.md +++ /dev/null @@ -1,110 +0,0 @@ - -

      Download OBB Bus Simulator Indonesia Mod APK: A Guide for Android Users

      -

      If you are a fan of simulation games, especially bus driving games, you might want to try OBB Bus Simulator Indonesia Mod APK. This is a modified version of the popular game Bus Simulator Indonesia, which lets you experience the thrill of driving a bus in the beautiful and diverse country of Indonesia. In this article, we will show you how to download and install OBB Bus Simulator Indonesia Mod APK on your Android device, as well as some of the features, tips, and tricks that you can enjoy in this game.

      -

      How to download OBB Bus Simulator Indonesia Mod APK from APKCombo

      -

      One of the easiest ways to download OBB Bus Simulator Indonesia Mod APK is from APKCombo, a website that offers a variety of Android apps and games for free. Here are the steps to follow:

      -

      download obb bus simulator indonesia mod apk


      Download 🌟 https://gohhs.com/2uPq7f



      -
        -
      1. Go to APKCombo and search for "Bus Simulator Indonesia" in the search bar.
      2. -
      3. Select the app with the name "Bus Simulator Indonesia" and the developer "Maleo". You will see a green button that says "Download APK". Click on it.
      4. -
      5. You will be redirected to a page where you can choose the version of the app that you want to download. The latest version is 3.5, which was updated on September 7, 2023. Click on the blue button that says "Download (XAPK)".
      6. -
      7. You will see a pop-up window that asks you to confirm your download. Click on "OK". The file will start downloading to your device.
      8. -
      9. Once the download is complete, you will need to extract the XAPK file using a file manager app. You can use any app that can handle ZIP files, such as ES File Explorer or ZArchiver. Locate the XAPK file in your downloads folder and extract it. You will get two files: an APK file and an OBB file.
      10. -
      -

      How to install OBB Bus Simulator Indonesia Mod APK on your Android device

      -

      After extracting the XAPK file, you will need to install the APK file and copy the OBB file to your device's internal storage. Here are the steps to follow:

      -
        -
      1. Before installing the APK file, you will need to enable "Unknown sources" on your device. This will allow you to install apps from sources other than Google Play Store. To do this, go to Settings > Security > Unknown sources and toggle it on.
      2. -
      3. Locate the APK file in your downloads folder or wherever you extracted it. Tap on it and follow the instructions on the screen to install it.
      4. -
      5. After installing the APK file, do not open it yet. You will need to copy the OBB file to a specific folder on your device's internal storage. The folder name is "com.maleo.bussimulatorid". If you don't have this folder, you can create it manually using a file manager app. The full path of the folder is "/Android/obb/com.maleo.bussimulatorid".
      6. -
      7. Locate the OBB file in your downloads folder or wherever you extracted it. It has a name like "main.35.com.maleo.bussimulatorid.obb". Copy or move it to the folder that you created or found in the previous step.
      8. -
      9. After copying the OBB file, you can now open the app and enjoy playing OBB Bus Simulator Indonesia Mod APK on your Android device.
      10. -
      -

      Features of OBB Bus Simulator Indonesia Mod APK

      -

      OBB Bus Simulator Indonesia Mod APK is a fun and realistic game that lets you drive various types of buses in Indonesia. You can customize your buses with different skins, accessories, and stickers, and choose from different maps and routes to explore. Here are some of the features that you can enjoy in this game:

      -
        -
      • Realistic graphics and sound effects: The game has high-quality graphics that show the details of the buses, the roads, the buildings, and the scenery. You can also hear the sounds of the engine, the horn, the traffic, and the passengers. The game also supports Indonesian language and voiceovers.
      • -
      • Customizable buses and skins: You can choose from a variety of buses, such as mini buses, double-decker buses, school buses, and more. You can also change the color, the design, and the logo of your buses, and add accessories like lights, mirrors, wipers, and spoilers. You can even create your own skins using the built-in editor.
      • -
      • Various maps and routes to explore: You can drive your buses on different maps that represent different regions of Indonesia, such as Java, Sumatra, Bali, and more. You can also choose from different routes that have different levels of difficulty, traffic, and scenery. You can see the landmarks, the monuments, and the culture of each region.
      • -
      • Online and offline modes: You can play OBB Bus Simulator Indonesia Mod APK online or offline. In online mode, you can connect with other players from around the world and join clans. You can chat with them, compete with them, and cooperate with them. In offline mode, you can play without an internet connection and enjoy the game at your own pace.
      • -
      -

      Tips and Tricks for Playing OBB Bus Simulator Indonesia Mod APK

      -

      OBB Bus Simulator Indonesia Mod APK is a challenging and rewarding game that requires skill and strategy. Here are some tips and tricks that can help you play better and have more fun:

      -
        -
      • How to drive safely and efficiently: To drive your bus safely and efficiently, you need to follow the traffic rules and regulations. You need to obey the speed limit, the traffic lights, the signs, and the signals. You also need to avoid collisions with other vehicles, pedestrians, animals, and obstacles. You need to use your indicators, your headlights, your brake lights, and your horn when necessary. You also need to check your mirrors, your dashboard, and your GPS regularly.
      • -
      • How to earn more money and unlock new buses: To earn more money and unlock new buses, you need to complete missions and challenges. You need to pick up and drop off passengers at their destinations on time and without any accidents. You also need to collect coins and bonuses along the way. You can use the money to buy new buses or upgrade your existing ones. You can also unlock new buses by reaching certain levels or achievements.
      • -
      • How to use the map and GPS system: To use the map and GPS system, you need to tap on the map icon on the top right corner of the screen. You will see a map that shows your location, your destination, your route, and your progress. You will also see icons that represent gas stations, repair shops, rest areas, toll booths, checkpoints, and landmarks. You can zoom in or out of the map by pinching or spreading your fingers on the screen. You can also tap on any icon to see more information about it.
      • -
      • How to interact with other players and join clans: To interact with other players and join clans, you need to play in online mode. You can tap on the chat icon on the top left corner of the screen to open the chat window. You can type your message and send it to all players or to a specific player. You can also use emojis and stickers to express yourself. You can also tap on the clan icon on the bottom right corner of the screen to open the clan menu. You can create your own clan or join an existing one. You can invite other players to your clan or accept their invitations. You can also see the clan ranking, the clan chat, and the clan missions.
      • -
      -

      Alternatives to OBB Bus Simulator Indonesia Mod APK

      -

      If you are looking for other games that are similar to OBB Bus Simulator Indonesia Mod APK, you might want to check out these alternatives:

      -
        -
      • Chittagong Road Traffic OBB for Bus Simulator Indonesia: This is a mod that adds a new map and new buses to Bus Simulator Indonesia. The map is based on the city of Chittagong in Bangladesh, which is known for its heavy traffic and chaotic roads. You can drive different types of buses, such as CNG buses, Volvo buses, and Scania buses. You can also enjoy the realistic scenery, the landmarks, and the culture of Chittagong.
      • -
      • Public Transport Simulator - C: This is a game that lets you drive various types of public transport vehicles, such as buses, taxis, trams, and trains. You can choose from different modes, such as career mode, free ride mode, and multiplayer mode. You can also customize your vehicles with different colors, decals, and accessories. You can also explore different cities and environments, such as urban areas, rural areas, mountains, and deserts.
      • -
      • SimCity BuildIt: This is a game that lets you create and manage your own city. You can build different types of buildings, such as residential buildings, commercial buildings, industrial buildings, and public buildings. You can also provide services and amenities to your citizens, such as power, water, transportation, education, health, and entertainment. You can also face various challenges and disasters, such as traffic jams, pollution, fires, earthquakes, and more.
      • -
      -
      Conclusion and FAQs
      -

      OBB Bus Simulator Indonesia Mod APK is a great game for anyone who loves simulation games and bus driving games. It offers a realistic and immersive experience of driving a bus in Indonesia. You can download and install OBB Bus Simulator Indonesia Mod APK from APKCombo by following the steps in this article. You can also enjoy the features, tips, and tricks that we have shared with you. If you have any questions or feedback about OBB Bus Simulator Indonesia Mod APK, you can check out these FAQs or leave a comment below.

      -

      download obb file for bus simulator indonesia mod apk
      -download bus simulator indonesia mod apk unlimited money obb
      -download bus simulator indonesia mod apk + obb data
      -download bus simulator indonesia mod apk terbaru obb
      -download bus simulator indonesia mod apk versi lama obb
      -download bus simulator indonesia mod apk full unlocked obb
      -download bus simulator indonesia mod apk bussid v3.7.1 obb
      -download bus simulator indonesia mod apk livery bussid obb
      -download bus simulator indonesia mod apk traffic hd obb
      -download bus simulator indonesia mod apk skin keren obb
      -download bus simulator indonesia mod apk sound jet darat obb
      -download bus simulator indonesia mod apk grafik hd obb
      -download bus simulator indonesia mod apk map sumatra obb
      -download bus simulator indonesia mod apk map jawa obb
      -download bus simulator indonesia mod apk map bali obb
      -download bus simulator indonesia mod apk map lombok obb
      -download bus simulator indonesia mod apk map kalimantan obb
      -download bus simulator indonesia mod apk map sulawesi obb
      -download bus simulator indonesia mod apk map papua obb
      -download bus simulator indonesia mod apk map malaysia obb
      -download bus simulator indonesia mod apk map singapore obb
      -download bus simulator indonesia mod apk map thailand obb
      -download bus simulator indonesia mod apk map vietnam obb
      -download bus simulator indonesia mod apk map cambodia obb
      -download bus simulator indonesia mod apk map laos obb
      -download bus simulator indonesia mod apk map myanmar obb
      -download bus simulator indonesia mod apk map philippines obb
      -download bus simulator indonesia mod apk map brunei obb
      -download bus simulator indonesia mod apk all buses unlocked obb
      -download bus simulator indonesia mod apk all cars unlocked obb
      -download bus simulator indonesia mod apk all trucks unlocked obb
      -download bus simulator indonesia mod apk all motorcycles unlocked obb
      -download bus simulator indonesia mod apk all vehicles unlocked obb
      -download bus simulator indonesia mod apk no ads obb
      -download bus simulator indonesia mod apk no root obb
      -download bus simulator indonesia mod apk offline mode obb
      -download bus simulator indonesia mod apk online mode obb
      -download bus simulator indonesia mod apk multiplayer mode obb
      -download bus simulator indonesia mod apk free shopping obb
      -download bus simulator indonesia mod apk unlimited fuel obb
      -download bus simulator indonesia mod apk unlimited coins obb
      -download bus simulator indonesia mod apk unlimited gems obb
      -download bus simulator indonesia mod apk unlimited tickets obb
      -download bus simulator indonesia mod apk unlimited xp obb
      -download bus simulator indonesia mod apk unlimited level up obb
      -download bus simulator indonesia mod apk cheat menu obb
      -download bus simulator indonesia mod apk anti ban obb

      -
      FAQs
      -
        -
      1. What are the requirements for playing OBB Bus Simulator Indonesia Mod APK?
        -To play OBB Bus Simulator Indonesia Mod APK, you need an Android device that has at least 2 GB of RAM and 1 GB of free storage space. You also need an internet connection for online mode.
      2. -
      3. Is OBB Bus Simulator Indonesia Mod APK safe to download and install?
        -Yes, OBB Bus Simulator Indonesia Mod APK is safe to download and install from APKCombo. APKCombo is a trusted website that offers verified and secure Android apps and games. However, you should always be careful when downloading apps from unknown sources and scan them with an antivirus app before installing them.
      4. -
      5. How can I update OBB Bus Simulator Indonesia Mod APK?
        -To update OBB Bus Simulator Indonesia Mod APK, you need to download the latest version of the XAPK file from APKCombo and follow the same steps as above to install it. You don't need to uninstall the previous version of the app.
      6. -
      7. How can I get more coins and bonuses in OBB Bus Simulator Indonesia Mod APK?
        -To get more coins and bonuses in OBB Bus Simulator Indonesia Mod APK, you can do the following things: - Drive carefully and avoid accidents. You will get a bonus for completing a trip without any damage or violation. - Collect the coins and the gift boxes that appear on the road. They will give you extra money or items. - Watch ads or complete offers to get free coins. You can do this by tapping on the plus icon next to your coin balance. - Use the mod menu to activate cheats. You can do this by tapping on the M icon on the top left corner of the screen. You can enable features such as unlimited money, unlimited fuel, no ads, and more.
      8. -
      9. How can I contact the developer of OBB Bus Simulator Indonesia Mod APK?
        -To contact the developer of OBB Bus Simulator Indonesia Mod APK, you can visit their official website, their Facebook page, or their Instagram account. You can also send them an email at support@maleo.id or leave a review on Google Play Store.
      10. -
      -

      I hope you enjoyed this article and learned something new about OBB Bus Simulator Indonesia Mod APK. If you did, please share it with your friends and family who might be interested in this game. Thank you for reading and happy gaming!

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Soul Knight MOD APK 5.2.4 and Explore the Dungeon with Friends.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Soul Knight MOD APK 5.2.4 and Explore the Dungeon with Friends.md deleted file mode 100644 index 9e938d9ae2bf525f91c35a2762242450c51a00e2..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Soul Knight MOD APK 5.2.4 and Explore the Dungeon with Friends.md +++ /dev/null @@ -1,93 +0,0 @@ - -

      Download Soul Knight Latest Version Mod APK

      -

      If you are looking for a shooter game that features extremely easy and intuitive control, super smooth and enjoyable gameplay, mixed with rogue-like elements, then you should try Soul Knight. Soul Knight is a pixel roguelike shoot'em up game that combines action and survival. You have to shoot some alien minions, retrieve the magical stone that maintains the balance of the world, and explore randomly generated dungeons full of treasures and surprises. In this article, we will show you how to download and install the latest version of Soul Knight mod apk, which will give you unlimited gems, coins, characters, skins, and more. Read on to find out more!

      -

      Features of Soul Knight

      -

      Soul Knight is a game that has many features that make it fun and addictive. Here are some of them:

      -

      download soul knight latest version mod apk


      Download ✫✫✫ https://gohhs.com/2uPrZT



      -

      Unique heroes and weapons

      -

      Soul Knight has more than 20 unique heroes, each with their own skills, stats, and playstyle. You can choose from a rogue, an elf archer, a magician, a knight, a robot, and more. You can also customize your hero with different skins and outfits. Moreover, Soul Knight has more than 400 weapons, ranging from guns, swords, shovels, lasers, rockets, grenades, and more. You can find different weapons in the dungeons or buy them from the shop. You can also upgrade your weapons with gems or coins.

      -

      Randomly generated dungeons

      -

      Soul Knight has randomly generated dungeons that offer endless replay value. You will never get bored of exploring different environments, such as dark forests populated by goblins, medieval chateaus infested with zombies, futuristic labs filled with robots, and more. You will also encounter different NPCs, such as merchants, blacksmiths, gardeners, nurses, wizards, and more. They can help you with various services or quests. You will also find chests, crates, barrels, pots, statues, plants, and other objects that may contain items or secrets.

      -

      Auto-aim mechanism and controller support

      -

      Soul Knight has an auto-aim mechanism that makes it super easy and intuitive to control your hero. You just need to tap on the screen to move your hero and dodge enemy attacks. Your hero will automatically shoot at the nearest enemy in range. You can also swipe on the screen to switch weapons or use skills. Additionally, Soul Knight supports controller input for both online and offline modes. You can connect your Bluetooth controller or use an emulator to play Soul Knight on your PC.

      -

      Multiplayer mode and game modes

      -

      Soul Knight has a multiplayer mode that allows you to team up with friends around the world for an online co-op adventure. You can join or create a room with up to three other players and share items and buffs. You can also chat with your teammates using emojis or voice messages. Alternatively, you can get together with your friends for an offline multiplayer LAN game. You just need to connect your devices to the same WiFi network and start the game. Furthermore, Soul Knight has different game modes that add more variety and challenge to the gameplay. You can play the normal mode, the boss rush mode, the daily challenge mode, the trial mode, or the origin mode. Each mode has different rules, objectives, and rewards.

      -

      Benefits of Soul Knight Mod APK

      -

      While Soul Knight is a free-to-play game, it also has some in-app purchases that can enhance your gaming experience. For example, you can buy gems and coins to unlock more characters, skins, weapons, pets, buffs, and other items. You can also remove ads and support the developers by buying the premium version of the game. However, if you don't want to spend real money on the game, you can download and install the Soul Knight mod apk, which will give you the following benefits:

      -

      Unlimited gems and coins

      -

      Gems and coins are the main currencies in Soul Knight. You can use them to buy weapons, items, buffs, pets, skins, and other things from the shop or the vending machines. You can also use them to upgrade your weapons or revive your hero. However, gems and coins are not easy to come by in the game. You have to complete dungeons, watch ads, or complete quests to earn them. With the Soul Knight mod apk, you will get unlimited gems and coins in your account. You can use them as much as you want without worrying about running out.

      -

      Unlocked all characters and skins

      -

      Soul Knight has more than 20 characters that you can play as. Each character has a unique skill and a different stat distribution. Some characters are more suitable for certain playstyles or weapons than others. However, not all characters are available from the start. You have to unlock them with gems or coins or by completing certain achievements. Moreover, each character has several skins that you can use to customize their appearance. Some skins are free, while others require gems or coins to unlock. With the Soul Knight mod apk, you will get all the characters and skins unlocked from the start. You can choose any character or skin you want without spending any gems or coins.

      -

      download soul knight mod apk unlimited gems
      -download soul knight mod apk latest version 2023
      -download soul knight mod apk android 1
      -download soul knight mod apk revdl
      -download soul knight mod apk an1
      -download soul knight mod apk happymod
      -download soul knight mod apk all characters unlocked
      -download soul knight mod apk no root
      -download soul knight mod apk free shopping
      -download soul knight mod apk for ios
      -download soul knight mod apk with obb
      -download soul knight mod apk god mode
      -download soul knight mod apk rexdl
      -download soul knight mod apk unlimited money and gems
      -download soul knight mod apk unlimited energy
      -download soul knight mod apk latest version 5.2.4
      -download soul knight mod apk new update
      -download soul knight mod apk offline
      -download soul knight mod apk online multiplayer
      -download soul knight mod apk unlimited everything
      -download soul knight mod apk premium unlocked
      -download soul knight mod apk unlimited health and ammo
      -download soul knight mod apk 5.2.3
      -download soul knight mod apk 5.2.2
      -download soul knight mod apk 5.2.1
      -download soul knight mod apk 5.2.0
      -download soul knight mod apk 5.1.9
      -download soul knight mod apk 5.1.8
      -download soul knight mod apk 5.1.7
      -download soul knight mod apk 5.1.6
      -download soul knight mod apk 5.1.5
      -download soul knight mod apk 5.1.4
      -download soul knight mod apk 5.1.3
      -download soul knight mod apk 5.1.2
      -download soul knight mod apk 5.1.1
      -download soul knight mod apk 5.1.0
      -download soul knight mod apk 5.0.9
      -download soul knight mod apk 5.0.8
      -download soul knight mod apk 5.0.7
      -download soul knight mod apk 5.0.6
      -download soul knight mod apk 5.0.5
      -download soul knight mod apk 5.0.4
      -download soul knight mod apk 5.0.3
      -download soul knight mod apk 5.0.2
      -download soul knight mod apk 5.0.1
      -download soul knight mod apk 5.0.0

      -

      Free shopping and no ads

      -

      Soul Knight is a free-to-play game that relies on ads and in-app purchases to generate revenue. Therefore, you will encounter ads when you play the game. Some ads are optional and will reward you with gems or coins if you watch them. However, some ads are mandatory and will interrupt your gameplay. Additionally, some items or features in the game require real money to purchase. For example, you have to pay $0.99 to remove ads or $4.99 to get the premium version of the game. With the Soul Knight mod apk, you will get free shopping and no ads in the game. You can buy anything you want without spending real money or watching ads.

      -

      How to Download and Install Soul Knight Mod APK

      -

      If you are interested in downloading and installing the Soul Knight mod apk on your Android device, you can follow these simple steps:

      -

      Step 1: Enable unknown sources

      -

      Before you can install any mod apk file on your device, you have to enable unknown sources in your settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device's settings > security > unknown sources and toggle it on.

      -

      Step 2: Download the mod apk file

      -

      Next, you have to download the Soul Knight mod apk file from a reliable source. You can use this link to download the latest version of the mod apk file (v3.2.6). The file size is about 136 MB and it is compatible with Android 4.4 and up.

      -

      Step 3: Install the mod apk file

      -

      After downloading the mod apk file, locate it in your device's file manager and tap on it to start the installation process. Follow the instructions on the screen and wait for a few seconds until the installation is complete.

      -

      Step 4: Enjoy the game

      -

      Once the installation is done, you can launch the game from your app drawer or home screen and enjoy playing Soul Knight with unlimited gems, coins, characters, skins, and more. You can also check out the different game modes and features that Soul Knight has to offer.

      -

      Conclusion

      -

      Soul Knight is a pixel roguelike shoot'em up game that combines action and survival. You have to shoot some alien minions, retrieve the magical stone that maintains the balance of the world, and explore randomly generated dungeons full of treasures and surprises. Soul Knight has many features that make it fun and addictive, such as unique heroes and weapons, randomly generated dungeons, auto-aim mechanism and controller support, multiplayer mode and game modes, and more. However, if you want to enjoy the game without spending real money or watching ads, you can download and install the Soul Knight mod apk, which will give you unlimited gems, coins, characters, skins, and more. You can also unlock all the items and features in the game with the mod apk. To download and install the Soul Knight mod apk, you just need to follow four simple steps: enable unknown sources, download the mod apk file, install the mod apk file, and enjoy the game. So what are you waiting for? Download Soul Knight mod apk now and have fun!

      -

      FAQs

      -

      Is Soul Knight mod apk safe to use?

      -

      Yes, Soul Knight mod apk is safe to use as long as you download it from a reliable source. The mod apk file does not contain any viruses or malware that can harm your device or compromise your privacy. However, you should always be careful when downloading any mod apk file from the internet and scan it with an antivirus before installing it.

      -

      Do I need to root my device to use Soul Knight mod apk?

      -

      No, you do not need to root your device to use Soul Knight mod apk. The mod apk file works on both rooted and non-rooted devices without any issues. You just need to enable unknown sources in your settings and install the mod apk file as usual.

      -

      What is the latest version of Soul Knight mod apk?

      -

      The latest version of Soul Knight mod apk is v3.2.6, which was released on June 18, 2023. The latest version of the mod apk file has some new features and improvements, such as new characters, weapons, pets, skins, dungeons, bosses, enemies, items, buffs, achievements, and more. You can download the latest version of Soul Knight mod apk from this link.

      -

      How can I play Soul Knight online with friends?

      -

      You can play Soul Knight online with friends by using the multiplayer mode in the game. You can join or create a room with up to three other players and share items and buffs. You can also chat with your teammates using emojis or voice messages. To play Soul Knight online with friends, you need to have a stable internet connection and a valid account in the game.

      -

      Where can I download Soul Knight mod apk?

      -

      You can download Soul Knight mod apk from this link. This is a reliable source that provides the latest version of the mod apk file with unlimited gems, coins, characters, skins, and more. You can also find other information about the game and the mod apk file on this website.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/qs/test/utils.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/qs/test/utils.js deleted file mode 100644 index aa84dfdc62beb79b5b980fca5d6ba40c19caa123..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/qs/test/utils.js +++ /dev/null @@ -1,136 +0,0 @@ -'use strict'; - -var test = require('tape'); -var inspect = require('object-inspect'); -var SaferBuffer = require('safer-buffer').Buffer; -var forEach = require('for-each'); -var utils = require('../lib/utils'); - -test('merge()', function (t) { - t.deepEqual(utils.merge(null, true), [null, true], 'merges true into null'); - - t.deepEqual(utils.merge(null, [42]), [null, 42], 'merges null into an array'); - - t.deepEqual(utils.merge({ a: 'b' }, { a: 'c' }), { a: ['b', 'c'] }, 'merges two objects with the same key'); - - var oneMerged = utils.merge({ foo: 'bar' }, { foo: { first: '123' } }); - t.deepEqual(oneMerged, { foo: ['bar', { first: '123' }] }, 'merges a standalone and an object into an array'); - - var twoMerged = utils.merge({ foo: ['bar', { first: '123' }] }, { foo: { second: '456' } }); - t.deepEqual(twoMerged, { foo: { 0: 'bar', 1: { first: '123' }, second: '456' } }, 'merges a standalone and two objects into an array'); - - var sandwiched = utils.merge({ foo: ['bar', { first: '123', second: '456' }] }, { foo: 'baz' }); - t.deepEqual(sandwiched, { foo: ['bar', { first: '123', second: '456' }, 'baz'] }, 'merges an object sandwiched by two standalones into an array'); - - var nestedArrays = utils.merge({ foo: ['baz'] }, { foo: ['bar', 'xyzzy'] }); - t.deepEqual(nestedArrays, { foo: ['baz', 'bar', 'xyzzy'] }); - - var noOptionsNonObjectSource = utils.merge({ foo: 'baz' }, 'bar'); - t.deepEqual(noOptionsNonObjectSource, { foo: 'baz', bar: true }); - - t.test( - 'avoids invoking array setters unnecessarily', - { skip: typeof Object.defineProperty !== 'function' }, - function (st) { - var setCount = 0; - var getCount = 0; - var observed = []; - Object.defineProperty(observed, 0, { - get: function () { - getCount += 1; - return { bar: 'baz' }; - }, - set: function () { setCount += 1; } - }); - utils.merge(observed, [null]); - st.equal(setCount, 0); - st.equal(getCount, 1); - observed[0] = observed[0]; // eslint-disable-line no-self-assign - st.equal(setCount, 1); - st.equal(getCount, 2); - st.end(); - } - ); - - t.end(); -}); - -test('assign()', function (t) { - var target = { a: 1, b: 2 }; - var source = { b: 3, c: 4 }; - var result = utils.assign(target, source); - - t.equal(result, target, 'returns the target'); - t.deepEqual(target, { a: 1, b: 3, c: 4 }, 'target and source are merged'); - t.deepEqual(source, { b: 3, c: 4 }, 'source is untouched'); - - t.end(); -}); - -test('combine()', function (t) { - t.test('both arrays', function (st) { - var a = [1]; - var b = [2]; - var combined = utils.combine(a, b); - - st.deepEqual(a, [1], 'a is not mutated'); - st.deepEqual(b, [2], 'b is not mutated'); - st.notEqual(a, combined, 'a !== combined'); - st.notEqual(b, combined, 'b !== combined'); - st.deepEqual(combined, [1, 2], 'combined is a + b'); - - st.end(); - }); - - t.test('one array, one non-array', function (st) { - var aN = 1; - var a = [aN]; - var bN = 2; - var b = [bN]; - - var combinedAnB = utils.combine(aN, b); - st.deepEqual(b, [bN], 'b is not mutated'); - st.notEqual(aN, combinedAnB, 'aN + b !== aN'); - st.notEqual(a, combinedAnB, 'aN + b !== a'); - st.notEqual(bN, combinedAnB, 'aN + b !== bN'); - st.notEqual(b, combinedAnB, 'aN + b !== b'); - st.deepEqual([1, 2], combinedAnB, 'first argument is array-wrapped when not an array'); - - var combinedABn = utils.combine(a, bN); - st.deepEqual(a, [aN], 'a is not mutated'); - st.notEqual(aN, combinedABn, 'a + bN !== aN'); - st.notEqual(a, combinedABn, 'a + bN !== a'); - st.notEqual(bN, combinedABn, 'a + bN !== bN'); - st.notEqual(b, combinedABn, 'a + bN !== b'); - st.deepEqual([1, 2], combinedABn, 'second argument is array-wrapped when not an array'); - - st.end(); - }); - - t.test('neither is an array', function (st) { - var combined = utils.combine(1, 2); - st.notEqual(1, combined, '1 + 2 !== 1'); - st.notEqual(2, combined, '1 + 2 !== 2'); - st.deepEqual([1, 2], combined, 'both arguments are array-wrapped when not an array'); - - st.end(); - }); - - t.end(); -}); - -test('isBuffer()', function (t) { - forEach([null, undefined, true, false, '', 'abc', 42, 0, NaN, {}, [], function () {}, /a/g], function (x) { - t.equal(utils.isBuffer(x), false, inspect(x) + ' is not a buffer'); - }); - - var fakeBuffer = { constructor: Buffer }; - t.equal(utils.isBuffer(fakeBuffer), false, 'fake buffer is not a buffer'); - - var saferBuffer = SaferBuffer.from('abc'); - t.equal(utils.isBuffer(saferBuffer), true, 'SaferBuffer instance is a buffer'); - - var buffer = Buffer.from && Buffer.alloc ? Buffer.from('abc') : new Buffer('abc'); - t.equal(utils.isBuffer(buffer), true, 'real Buffer instance is a buffer'); - t.end(); -}); diff --git a/spaces/flowers-team/Interactive_DeepRL_Demo/js/ui_state/components/main_buttons.js b/spaces/flowers-team/Interactive_DeepRL_Demo/js/ui_state/components/main_buttons.js deleted file mode 100644 index 7520de276f37497351d850659d270187c6a70c8c..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/Interactive_DeepRL_Demo/js/ui_state/components/main_buttons.js +++ /dev/null @@ -1,78 +0,0 @@ -import Component from '../lib/component.js'; -import store from '../store/index.js'; - -/** - * @classdesc UI component for the main buttons. - */ -export default class MainButtons extends Component { - - /** - * @constructor - */ - constructor() { - super({ - store, - element: document.querySelector('#mainButtons'), - eventName: 'mainButtonsChange' - }); - } - - /** - * Renders the run, reset and save buttons. - */ - render() { - const status = store.state.simulationState.status; - - let runButton = this.element.querySelector("#runButton"); - let resetButton = this.element.querySelector("#resetButton"); - let saveEnvButton = this.element.querySelector('#saveEnvButton'); - - let dict = window.lang_dict[store.state.language]['mainButtons']; - - // Disables buttons while drawing - if(store.state.drawingModeState.drawing){ - runButton.className = "btn btn-success disabled"; - runButton.title = dict['runBtnTooltip']; - resetButton.className = "btn btn-danger disabled"; - saveEnvButton.className = "btn btn-primary mx-3 disabled"; - } - - // Enables buttons when not drawing - else{ - // Turns run button into pause button while running - if (status == 'running') { - runButton.className = "btn btn-warning"; - runButton.childNodes[0].classList.add("fa-pause"); - runButton.childNodes[0].classList.remove("fa-play"); - runButton.title = dict['pauseBtnTooltip']; - } - - // Turns pause button into run button when not running - else { - runButton.className = "btn btn-success"; - runButton.childNodes[0].classList.remove("fa-pause"); - runButton.childNodes[0].classList.add("fa-play"); - runButton.title = dict['runBtnTooltip']; - } - - // Disables save button during intro tour - if(store.state.simulationState.intro_tour){ - saveEnvButton.className = "btn btn-primary mx-3 disabled"; - } - else{ - saveEnvButton.className = "btn btn-primary mx-3"; - saveEnvButton.title = dict['saveBtnTooltip']; - } - - resetButton.className = "btn btn-danger"; - resetButton.title = dict['resetBtnTooltip']; - } - - /* Initializes tooltips */ - this.element.querySelectorAll('[data-bs-toggle="tooltip"]').forEach((el, index) => { - return new bootstrap.Tooltip(el, { - trigger: 'hover' - }); - }); - } -}; \ No newline at end of file diff --git a/spaces/francojc/transcribe/README.md b/spaces/francojc/transcribe/README.md deleted file mode 100644 index d334dca4b097cb03a8f04ecf8e26fb8f11d863b5..0000000000000000000000000000000000000000 --- a/spaces/francojc/transcribe/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Transcribe -emoji: 🦀 -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.44.4 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/frankio/goatheadrecordschatbot/README.md b/spaces/frankio/goatheadrecordschatbot/README.md deleted file mode 100644 index 129c579f26f6b5c127e8a7b14dd6d495d7102d09..0000000000000000000000000000000000000000 --- a/spaces/frankio/goatheadrecordschatbot/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Goathead Records Gpt-3.5-turbo Using Langchain Sorta -emoji: 📚 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/segmentors/base.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/segmentors/base.py deleted file mode 100644 index 172fc63b736c4f13be1cd909433bc260760a1eaa..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/segmentors/base.py +++ /dev/null @@ -1,273 +0,0 @@ -import logging -import warnings -from abc import ABCMeta, abstractmethod -from collections import OrderedDict - -import annotator.uniformer.mmcv as mmcv -import numpy as np -import torch -import torch.distributed as dist -import torch.nn as nn -from annotator.uniformer.mmcv.runner import auto_fp16 - - -class BaseSegmentor(nn.Module): - """Base class for segmentors.""" - - __metaclass__ = ABCMeta - - def __init__(self): - super(BaseSegmentor, self).__init__() - self.fp16_enabled = False - - @property - def with_neck(self): - """bool: whether the segmentor has neck""" - return hasattr(self, 'neck') and self.neck is not None - - @property - def with_auxiliary_head(self): - """bool: whether the segmentor has auxiliary head""" - return hasattr(self, - 'auxiliary_head') and self.auxiliary_head is not None - - @property - def with_decode_head(self): - """bool: whether the segmentor has decode head""" - return hasattr(self, 'decode_head') and self.decode_head is not None - - @abstractmethod - def extract_feat(self, imgs): - """Placeholder for extract features from images.""" - pass - - @abstractmethod - def encode_decode(self, img, img_metas): - """Placeholder for encode images with backbone and decode into a - semantic segmentation map of the same size as input.""" - pass - - @abstractmethod - def forward_train(self, imgs, img_metas, **kwargs): - """Placeholder for Forward function for training.""" - pass - - @abstractmethod - def simple_test(self, img, img_meta, **kwargs): - """Placeholder for single image test.""" - pass - - @abstractmethod - def aug_test(self, imgs, img_metas, **kwargs): - """Placeholder for augmentation test.""" - pass - - def init_weights(self, pretrained=None): - """Initialize the weights in segmentor. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - if pretrained is not None: - logger = logging.getLogger() - logger.info(f'load model from: {pretrained}') - - def forward_test(self, imgs, img_metas, **kwargs): - """ - Args: - imgs (List[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains all images in the batch. - img_metas (List[List[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. - """ - for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: - if not isinstance(var, list): - raise TypeError(f'{name} must be a list, but got ' - f'{type(var)}') - - num_augs = len(imgs) - if num_augs != len(img_metas): - raise ValueError(f'num of augmentations ({len(imgs)}) != ' - f'num of image meta ({len(img_metas)})') - # all images in the same aug batch all of the same ori_shape and pad - # shape - for img_meta in img_metas: - ori_shapes = [_['ori_shape'] for _ in img_meta] - assert all(shape == ori_shapes[0] for shape in ori_shapes) - img_shapes = [_['img_shape'] for _ in img_meta] - assert all(shape == img_shapes[0] for shape in img_shapes) - pad_shapes = [_['pad_shape'] for _ in img_meta] - assert all(shape == pad_shapes[0] for shape in pad_shapes) - - if num_augs == 1: - return self.simple_test(imgs[0], img_metas[0], **kwargs) - else: - return self.aug_test(imgs, img_metas, **kwargs) - - @auto_fp16(apply_to=('img', )) - def forward(self, img, img_metas, return_loss=True, **kwargs): - """Calls either :func:`forward_train` or :func:`forward_test` depending - on whether ``return_loss`` is ``True``. - - Note this setting will change the expected inputs. When - ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor - and List[dict]), and when ``resturn_loss=False``, img and img_meta - should be double nested (i.e. List[Tensor], List[List[dict]]), with - the outer list indicating test time augmentations. - """ - if return_loss: - return self.forward_train(img, img_metas, **kwargs) - else: - return self.forward_test(img, img_metas, **kwargs) - - def train_step(self, data_batch, optimizer, **kwargs): - """The iteration step during training. - - This method defines an iteration step during training, except for the - back propagation and optimizer updating, which are done in an optimizer - hook. Note that in some complicated cases or models, the whole process - including back propagation and optimizer updating is also defined in - this method, such as GAN. - - Args: - data (dict): The output of dataloader. - optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of - runner is passed to ``train_step()``. This argument is unused - and reserved. - - Returns: - dict: It should contain at least 3 keys: ``loss``, ``log_vars``, - ``num_samples``. - ``loss`` is a tensor for back propagation, which can be a - weighted sum of multiple losses. - ``log_vars`` contains all the variables to be sent to the - logger. - ``num_samples`` indicates the batch size (when the model is - DDP, it means the batch size on each GPU), which is used for - averaging the logs. - """ - losses = self(**data_batch) - loss, log_vars = self._parse_losses(losses) - - outputs = dict( - loss=loss, - log_vars=log_vars, - num_samples=len(data_batch['img_metas'])) - - return outputs - - def val_step(self, data_batch, **kwargs): - """The iteration step during validation. - - This method shares the same signature as :func:`train_step`, but used - during val epochs. Note that the evaluation after training epochs is - not implemented with this method, but an evaluation hook. - """ - output = self(**data_batch, **kwargs) - return output - - @staticmethod - def _parse_losses(losses): - """Parse the raw outputs (losses) of the network. - - Args: - losses (dict): Raw output of the network, which usually contain - losses and other necessary information. - - Returns: - tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor - which may be a weighted sum of all losses, log_vars contains - all the variables to be sent to the logger. - """ - log_vars = OrderedDict() - for loss_name, loss_value in losses.items(): - if isinstance(loss_value, torch.Tensor): - log_vars[loss_name] = loss_value.mean() - elif isinstance(loss_value, list): - log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) - else: - raise TypeError( - f'{loss_name} is not a tensor or list of tensors') - - loss = sum(_value for _key, _value in log_vars.items() - if 'loss' in _key) - - log_vars['loss'] = loss - for loss_name, loss_value in log_vars.items(): - # reduce loss when distributed training - if dist.is_available() and dist.is_initialized(): - loss_value = loss_value.data.clone() - dist.all_reduce(loss_value.div_(dist.get_world_size())) - log_vars[loss_name] = loss_value.item() - - return loss, log_vars - - def show_result(self, - img, - result, - palette=None, - win_name='', - show=False, - wait_time=0, - out_file=None, - opacity=0.5): - """Draw `result` over `img`. - - Args: - img (str or Tensor): The image to be displayed. - result (Tensor): The semantic segmentation results to draw over - `img`. - palette (list[list[int]]] | np.ndarray | None): The palette of - segmentation map. If None is given, random palette will be - generated. Default: None - win_name (str): The window name. - wait_time (int): Value of waitKey param. - Default: 0. - show (bool): Whether to show the image. - Default: False. - out_file (str or None): The filename to write the image. - Default: None. - opacity(float): Opacity of painted segmentation map. - Default 0.5. - Must be in (0, 1] range. - Returns: - img (Tensor): Only if not `show` or `out_file` - """ - img = mmcv.imread(img) - img = img.copy() - seg = result[0] - if palette is None: - if self.PALETTE is None: - palette = np.random.randint( - 0, 255, size=(len(self.CLASSES), 3)) - else: - palette = self.PALETTE - palette = np.array(palette) - assert palette.shape[0] == len(self.CLASSES) - assert palette.shape[1] == 3 - assert len(palette.shape) == 2 - assert 0 < opacity <= 1.0 - color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) - for label, color in enumerate(palette): - color_seg[seg == label, :] = color - # convert to BGR - color_seg = color_seg[..., ::-1] - - img = img * (1 - opacity) + color_seg * opacity - img = img.astype(np.uint8) - # if out_file specified, do not show image in window - if out_file is not None: - show = False - - if show: - mmcv.imshow(img, win_name, wait_time) - if out_file is not None: - mmcv.imwrite(img, out_file) - - if not (show or out_file): - warnings.warn('show==False and out_file is not specified, only ' - 'result image will be returned') - return img diff --git a/spaces/giiift/expert_system/app.py b/spaces/giiift/expert_system/app.py deleted file mode 100644 index e1fb59b3fd5bfb5523ec66eee65f684e3cf97d84..0000000000000000000000000000000000000000 --- a/spaces/giiift/expert_system/app.py +++ /dev/null @@ -1,44 +0,0 @@ -from llama_index import LLMPredictor, GPTSimpleVectorIndex, PromptHelper, ServiceContext, SimpleDirectoryReader -from langchain.chat_models import ChatOpenAI -import gradio as gr -import sys -import os - - -os.environ["OPENAI_API_KEY"] - -def construct_index(directory_path): - max_input_size = 4096 - num_outputs = 512 - max_chunk_overlap = 20 - chunk_size_limit = 600 - - prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit) - - llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.1, model_name="gpt-3.5-turbo", max_tokens=num_outputs)) - - service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper) - - documents = SimpleDirectoryReader(directory_path).load_data() - - index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context) - - index.save_to_disk('index.json') - - return index - -def chatbot(input_text): - index = GPTSimpleVectorIndex.load_from_disk('index.json') - prompt_1 = "You are a R&D engineer in electronic industry, answer the following question based on embeded internal file, if not find in internal file, ask the answer in your understanding," - response = index.query(prompt_1 + input_text, response_mode="default") - return response.response - - -iface = gr.Interface(fn=chatbot, - inputs=gr.components.Textbox(lines=7, label="Find the R&D files"), - outputs="text", - title="Your AI R&D Expert") - -# As the index has already been constructed, inactive this line to save embedding tokens -index = construct_index("docs") -iface.launch() \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Download Dishoom free hindi movie and laugh out loud with Jacqueline Fernandez and Nargis Fakhri.md b/spaces/gotiQspiryo/whisper-ui/examples/Download Dishoom free hindi movie and laugh out loud with Jacqueline Fernandez and Nargis Fakhri.md deleted file mode 100644 index 03f9cc828b499cecb58ea08719cf680dd9252303..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Download Dishoom free hindi movie and laugh out loud with Jacqueline Fernandez and Nargis Fakhri.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Dishoom free download hindi


      DOWNLOAD ★★★ https://urlgoal.com/2uyLQh



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/gyugnsu/DragGan-Inversion/legacy.py b/spaces/gyugnsu/DragGan-Inversion/legacy.py deleted file mode 100644 index a874c38c2c943e632badb8e12f5a4297071827df..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/legacy.py +++ /dev/null @@ -1,369 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Converting legacy network pickle into the new format.""" - -import click -import pickle -import re -import copy -import numpy as np -import torch -import dnnlib -from torch_utils import misc - -# ---------------------------------------------------------------------------- - - -def load_network_pkl(f, force_fp16=False): - data = _LegacyUnpickler(f).load() - - # Legacy TensorFlow pickle => convert. - if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data): - tf_G, tf_D, tf_Gs = data - G = convert_tf_generator(tf_G) - D = convert_tf_discriminator(tf_D) - G_ema = convert_tf_generator(tf_Gs) - data = dict(G=G, D=D, G_ema=G_ema) - - # Add missing fields. - if 'training_set_kwargs' not in data: - data['training_set_kwargs'] = None - if 'augment_pipe' not in data: - data['augment_pipe'] = None - - # Validate contents. - assert isinstance(data['G'], torch.nn.Module) - assert isinstance(data['D'], torch.nn.Module) - assert isinstance(data['G_ema'], torch.nn.Module) - assert isinstance(data['training_set_kwargs'], (dict, type(None))) - assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None))) - - # Force FP16. - if force_fp16: - for key in ['G', 'D', 'G_ema']: - old = data[key] - kwargs = copy.deepcopy(old.init_kwargs) - fp16_kwargs = kwargs.get('synthesis_kwargs', kwargs) - fp16_kwargs.num_fp16_res = 4 - fp16_kwargs.conv_clamp = 256 - if kwargs != old.init_kwargs: - new = type(old)(**kwargs).eval().requires_grad_(False) - misc.copy_params_and_buffers(old, new, require_all=True) - data[key] = new - return data - -# ---------------------------------------------------------------------------- - - -class _TFNetworkStub(dnnlib.EasyDict): - pass - - -class _LegacyUnpickler(pickle.Unpickler): - def find_class(self, module, name): - if module == 'dnnlib.tflib.network' and name == 'Network': - return _TFNetworkStub - return super().find_class(module, name) - -# ---------------------------------------------------------------------------- - - -def _collect_tf_params(tf_net): - # pylint: disable=protected-access - tf_params = dict() - - def recurse(prefix, tf_net): - for name, value in tf_net.variables: - tf_params[prefix + name] = value - for name, comp in tf_net.components.items(): - recurse(prefix + name + '/', comp) - recurse('', tf_net) - return tf_params - -# ---------------------------------------------------------------------------- - - -def _populate_module_params(module, *patterns): - for name, tensor in misc.named_params_and_buffers(module): - found = False - value = None - for pattern, value_fn in zip(patterns[0::2], patterns[1::2]): - match = re.fullmatch(pattern, name) - if match: - found = True - if value_fn is not None: - value = value_fn(*match.groups()) - break - try: - assert found - if value is not None: - tensor.copy_(torch.from_numpy(np.array(value))) - except: - print(name, list(tensor.shape)) - raise - -# ---------------------------------------------------------------------------- - - -def convert_tf_generator(tf_G): - if tf_G.version < 4: - raise ValueError('TensorFlow pickle version too low') - - # Collect kwargs. - tf_kwargs = tf_G.static_kwargs - known_kwargs = set() - - def kwarg(tf_name, default=None, none=None): - known_kwargs.add(tf_name) - val = tf_kwargs.get(tf_name, default) - return val if val is not None else none - - # Convert kwargs. - from training import networks_stylegan2 - network_class = networks_stylegan2.Generator - kwargs = dnnlib.EasyDict( - z_dim=kwarg('latent_size', 512), - c_dim=kwarg('label_size', 0), - w_dim=kwarg('dlatent_size', 512), - img_resolution=kwarg('resolution', 1024), - img_channels=kwarg('num_channels', 3), - channel_base=kwarg('fmap_base', 16384) * 2, - channel_max=kwarg('fmap_max', 512), - num_fp16_res=kwarg('num_fp16_res', 0), - conv_clamp=kwarg('conv_clamp', None), - architecture=kwarg('architecture', 'skip'), - resample_filter=kwarg('resample_kernel', [1, 3, 3, 1]), - use_noise=kwarg('use_noise', True), - activation=kwarg('nonlinearity', 'lrelu'), - mapping_kwargs=dnnlib.EasyDict( - num_layers=kwarg('mapping_layers', 8), - embed_features=kwarg('label_fmaps', None), - layer_features=kwarg('mapping_fmaps', None), - activation=kwarg('mapping_nonlinearity', 'lrelu'), - lr_multiplier=kwarg('mapping_lrmul', 0.01), - w_avg_beta=kwarg('w_avg_beta', 0.995, none=1), - ), - ) - - # Check for unknown kwargs. - kwarg('truncation_psi') - kwarg('truncation_cutoff') - kwarg('style_mixing_prob') - kwarg('structure') - kwarg('conditioning') - kwarg('fused_modconv') - unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs) - if len(unknown_kwargs) > 0: - raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0]) - - # Collect params. - tf_params = _collect_tf_params(tf_G) - for name, value in list(tf_params.items()): - match = re.fullmatch(r'ToRGB_lod(\d+)/(.*)', name) - if match: - r = kwargs.img_resolution // (2 ** int(match.group(1))) - tf_params[f'{r}x{r}/ToRGB/{match.group(2)}'] = value - kwargs.synthesis.kwargs.architecture = 'orig' - # for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}') - - # Convert params. - G = network_class(**kwargs).eval().requires_grad_(False) - # pylint: disable=unnecessary-lambda - # pylint: disable=f-string-without-interpolation - _populate_module_params(G, - r'mapping\.w_avg', lambda: tf_params[f'dlatent_avg'], - r'mapping\.embed\.weight', lambda: tf_params[f'mapping/LabelEmbed/weight'].transpose( - ), - r'mapping\.embed\.bias', lambda: tf_params[f'mapping/LabelEmbed/bias'], - r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'mapping/Dense{i}/weight'].transpose( - ), - r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'mapping/Dense{i}/bias'], - r'synthesis\.b4\.const', lambda: tf_params[f'synthesis/4x4/Const/const'][0], - r'synthesis\.b4\.conv1\.weight', lambda: tf_params[f'synthesis/4x4/Conv/weight'].transpose( - 3, 2, 0, 1), - r'synthesis\.b4\.conv1\.bias', lambda: tf_params[ - f'synthesis/4x4/Conv/bias'], - r'synthesis\.b4\.conv1\.noise_const', lambda: tf_params[ - f'synthesis/noise0'][0, 0], - r'synthesis\.b4\.conv1\.noise_strength', lambda: tf_params[ - f'synthesis/4x4/Conv/noise_strength'], - r'synthesis\.b4\.conv1\.affine\.weight', lambda: tf_params[ - f'synthesis/4x4/Conv/mod_weight'].transpose(), - r'synthesis\.b4\.conv1\.affine\.bias', lambda: tf_params[ - f'synthesis/4x4/Conv/mod_bias'] + 1, - r'synthesis\.b(\d+)\.conv0\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/weight'][::-1, ::-1].transpose( - 3, 2, 0, 1), - r'synthesis\.b(\d+)\.conv0\.bias', lambda r: tf_params[ - f'synthesis/{r}x{r}/Conv0_up/bias'], - r'synthesis\.b(\d+)\.conv0\.noise_const', lambda r: tf_params[ - f'synthesis/noise{int(np.log2(int(r)))*2-5}'][0, 0], - r'synthesis\.b(\d+)\.conv0\.noise_strength', lambda r: tf_params[ - f'synthesis/{r}x{r}/Conv0_up/noise_strength'], - r'synthesis\.b(\d+)\.conv0\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_weight'].transpose( - ), - r'synthesis\.b(\d+)\.conv0\.affine\.bias', lambda r: tf_params[ - f'synthesis/{r}x{r}/Conv0_up/mod_bias'] + 1, - r'synthesis\.b(\d+)\.conv1\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/weight'].transpose( - 3, 2, 0, 1), - r'synthesis\.b(\d+)\.conv1\.bias', lambda r: tf_params[ - f'synthesis/{r}x{r}/Conv1/bias'], - r'synthesis\.b(\d+)\.conv1\.noise_const', lambda r: tf_params[ - f'synthesis/noise{int(np.log2(int(r)))*2-4}'][0, 0], - r'synthesis\.b(\d+)\.conv1\.noise_strength', lambda r: tf_params[ - f'synthesis/{r}x{r}/Conv1/noise_strength'], - r'synthesis\.b(\d+)\.conv1\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_weight'].transpose( - ), - r'synthesis\.b(\d+)\.conv1\.affine\.bias', lambda r: tf_params[ - f'synthesis/{r}x{r}/Conv1/mod_bias'] + 1, - r'synthesis\.b(\d+)\.torgb\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/weight'].transpose( - 3, 2, 0, 1), - r'synthesis\.b(\d+)\.torgb\.bias', lambda r: tf_params[ - f'synthesis/{r}x{r}/ToRGB/bias'], - r'synthesis\.b(\d+)\.torgb\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_weight'].transpose( - ), - r'synthesis\.b(\d+)\.torgb\.affine\.bias', lambda r: tf_params[ - f'synthesis/{r}x{r}/ToRGB/mod_bias'] + 1, - r'synthesis\.b(\d+)\.skip\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Skip/weight'][::-1, ::-1].transpose( - 3, 2, 0, 1), - r'.*\.resample_filter', None, - r'.*\.act_filter', None, - ) - return G - -# ---------------------------------------------------------------------------- - - -def convert_tf_discriminator(tf_D): - if tf_D.version < 4: - raise ValueError('TensorFlow pickle version too low') - - # Collect kwargs. - tf_kwargs = tf_D.static_kwargs - known_kwargs = set() - - def kwarg(tf_name, default=None): - known_kwargs.add(tf_name) - return tf_kwargs.get(tf_name, default) - - # Convert kwargs. - kwargs = dnnlib.EasyDict( - c_dim=kwarg('label_size', 0), - img_resolution=kwarg('resolution', 1024), - img_channels=kwarg('num_channels', 3), - architecture=kwarg('architecture', 'resnet'), - channel_base=kwarg('fmap_base', 16384) * 2, - channel_max=kwarg('fmap_max', 512), - num_fp16_res=kwarg('num_fp16_res', 0), - conv_clamp=kwarg('conv_clamp', None), - cmap_dim=kwarg('mapping_fmaps', None), - block_kwargs=dnnlib.EasyDict( - activation=kwarg('nonlinearity', 'lrelu'), - resample_filter=kwarg('resample_kernel', [1, 3, 3, 1]), - freeze_layers=kwarg('freeze_layers', 0), - ), - mapping_kwargs=dnnlib.EasyDict( - num_layers=kwarg('mapping_layers', 0), - embed_features=kwarg('mapping_fmaps', None), - layer_features=kwarg('mapping_fmaps', None), - activation=kwarg('nonlinearity', 'lrelu'), - lr_multiplier=kwarg('mapping_lrmul', 0.1), - ), - epilogue_kwargs=dnnlib.EasyDict( - mbstd_group_size=kwarg('mbstd_group_size', None), - mbstd_num_channels=kwarg('mbstd_num_features', 1), - activation=kwarg('nonlinearity', 'lrelu'), - ), - ) - - # Check for unknown kwargs. - kwarg('structure') - kwarg('conditioning') - unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs) - if len(unknown_kwargs) > 0: - raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0]) - - # Collect params. - tf_params = _collect_tf_params(tf_D) - for name, value in list(tf_params.items()): - match = re.fullmatch(r'FromRGB_lod(\d+)/(.*)', name) - if match: - r = kwargs.img_resolution // (2 ** int(match.group(1))) - tf_params[f'{r}x{r}/FromRGB/{match.group(2)}'] = value - kwargs.architecture = 'orig' - # for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}') - - # Convert params. - from training import networks_stylegan2 - D = networks_stylegan2.Discriminator(**kwargs).eval().requires_grad_(False) - # pylint: disable=unnecessary-lambda - # pylint: disable=f-string-without-interpolation - _populate_module_params(D, - r'b(\d+)\.fromrgb\.weight', lambda r: tf_params[f'{r}x{r}/FromRGB/weight'].transpose( - 3, 2, 0, 1), - r'b(\d+)\.fromrgb\.bias', lambda r: tf_params[f'{r}x{r}/FromRGB/bias'], - r'b(\d+)\.conv(\d+)\.weight', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/weight'].transpose( - 3, 2, 0, 1), - r'b(\d+)\.conv(\d+)\.bias', lambda r, i: tf_params[ - f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/bias'], - r'b(\d+)\.skip\.weight', lambda r: tf_params[f'{r}x{r}/Skip/weight'].transpose( - 3, 2, 0, 1), - r'mapping\.embed\.weight', lambda: tf_params[f'LabelEmbed/weight'].transpose( - ), - r'mapping\.embed\.bias', lambda: tf_params[f'LabelEmbed/bias'], - r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'Mapping{i}/weight'].transpose( - ), - r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'Mapping{i}/bias'], - r'b4\.conv\.weight', lambda: tf_params[f'4x4/Conv/weight'].transpose( - 3, 2, 0, 1), - r'b4\.conv\.bias', lambda: tf_params[f'4x4/Conv/bias'], - r'b4\.fc\.weight', lambda: tf_params[f'4x4/Dense0/weight'].transpose( - ), - r'b4\.fc\.bias', lambda: tf_params[f'4x4/Dense0/bias'], - r'b4\.out\.weight', lambda: tf_params[f'Output/weight'].transpose( - ), - r'b4\.out\.bias', lambda: tf_params[f'Output/bias'], - r'.*\.resample_filter', None, - ) - return D - -# ---------------------------------------------------------------------------- - - -@click.command() -@click.option('--source', help='Input pickle', required=True, metavar='PATH') -@click.option('--dest', help='Output pickle', required=True, metavar='PATH') -@click.option('--force-fp16', help='Force the networks to use FP16', type=bool, default=False, metavar='BOOL', show_default=True) -def convert_network_pickle(source, dest, force_fp16): - """Convert legacy network pickle into the native PyTorch format. - - The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA. - It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks. - - Example: - - \b - python legacy.py \\ - --source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\ - --dest=stylegan2-cat-config-f.pkl - """ - print(f'Loading "{source}"...') - with dnnlib.util.open_url(source) as f: - data = load_network_pkl(f, force_fp16=force_fp16) - print(f'Saving "{dest}"...') - with open(dest, 'wb') as f: - pickle.dump(data, f) - print('Done.') - -# ---------------------------------------------------------------------------- - - -if __name__ == "__main__": - convert_network_pickle() # pylint: disable=no-value-for-parameter - -# ---------------------------------------------------------------------------- diff --git a/spaces/h2oai/wave-tour/examples/ml_dai_instances.py b/spaces/h2oai/wave-tour/examples/ml_dai_instances.py deleted file mode 100644 index 20768824b03691ef0c856ce48812c3e437b6b1cf..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/ml_dai_instances.py +++ /dev/null @@ -1,93 +0,0 @@ -# WaveML / DAI / Instances -# List the Driverless AI instances of the user on Steam. -# --- -import os - -from h2o_wave import main, app, Q, ui -from h2o_wave_ml.utils import list_dai_instances - -STEAM_URL = os.environ.get('STEAM_URL') -STEAM_TEXT = f'''No Driverless AI instances available. You may create one in - AI Engines and refresh the page.''' - -ICON_MAP = { - 'created': 'Blocked2Solid', - 'starting': 'Blocked2Solid', - 'running': 'CompletedSolid', - 'unreachable': 'AlertSolid', - 'failed': 'AlertSolid', - 'stopping': 'Blocked2Solid', - 'stopped': 'Blocked2Solid', - 'terminating': 'Blocked2Solid', - 'terminated': 'Blocked2Solid' -} - - -def dai_instances_table(dai_instances: list): - # dai instances in ui.table - return ui.table( - name='table_dai', - columns=[ - ui.table_column(name='id', label='Id', min_width='50px', max_width='51px', link=False), - ui.table_column(name='name', label='Name', link=False), - ui.table_column(name='status', label='Status', cell_type=ui.icon_table_cell_type(color='#CDDD38'), - link=False), - ui.table_column(name='description', label='Description', link=False), - ui.table_column(name='version', label='Version', link=False) - ], - rows=[ - ui.table_row(str(i), [ - str(dai_instances[i]['id']), - dai_instances[i]['name'], - ICON_MAP[dai_instances[i]['status']], - dai_instances[i]['status'], - dai_instances[i]['version'] - ]) for i in range(len(dai_instances)) - ] - ) - - -def form_unsupported(): - # display when app is not running on cloud - return [ - ui.text('''This example requires access to Driverless AI running on - H2O AI Cloud - and does not support standalone app instances.'''), - ui.text('''Sign up at https://h2o.ai/free - to run apps on cloud.''') - ] - - -def form_default(q: Q): - # display when app is initialized - return [ - ui.label(label='List of Driverless AI instances'), - dai_instances_table(dai_instances=q.client.dai_instances) - ] - - -@app('/demo') -async def serve(q: Q): - if 'H2O_CLOUD_ENVIRONMENT' not in os.environ: - # show appropriate message if app is not running on cloud - q.page['example'] = ui.form_card( - box='1 1 -1 -1', - items=form_unsupported() - ) - else: - # DAI instances - q.client.dai_instances = list_dai_instances(refresh_token=q.auth.refresh_token) - - # display ui - if q.client.dai_instances: - q.page['example'] = ui.form_card( - box='1 1 -1 -1', - items=form_default(q) - ) - else: - q.page['example'] = ui.form_card( - box='1 1 -1 -1', - items=[ui.text(content=STEAM_TEXT)] - ) - - await q.page.save() diff --git a/spaces/h2oai/wave-tour/examples/upload_async.py b/spaces/h2oai/wave-tour/examples/upload_async.py deleted file mode 100644 index b4e14cfd09d312581679dcacedf5f52b00622b3c..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/upload_async.py +++ /dev/null @@ -1,39 +0,0 @@ -# Uploads / Async -# Upload files from an interactive app. -# #upload -# --- - - -import os -from h2o_wave import main, app, Q, ui - - -def write_csv(filename, rows): - with open(filename, 'w', encoding='utf-8') as f: - f.write('\n'.join([','.join([str(x) for x in row]) for row in rows])) - - -@app('/demo') -async def serve(q: Q): - if q.args.generate_csv: - # Generate - write_csv('squares.csv', [[x, x * x] for x in range(1, 1 + q.args.row_count)]) - # Upload - download_path, = await q.site.upload(['squares.csv']) - # Clean up - os.remove('squares.csv') - - # Display link - q.page['example'].items = [ - ui.text_xl('Squares Generated!'), - ui.text(f'[Download my {q.args.row_count} squares!]({download_path})'), - ui.button(name='show_form', label='Back', primary=True), - ] - else: - # Accept a row count from the user - q.page['example'] = ui.form_card(box='1 1 4 7', items=[ - ui.text_xl('Square Generator'), - ui.slider(name='row_count', label='Squares to generate', min=0, max=100, step=10, value=30), - ui.button(name='generate_csv', label='Generate', primary=True), - ]) - await q.page.save() diff --git a/spaces/haakohu/deep_privacy2/configs/generators/stylegan_unet.py b/spaces/haakohu/deep_privacy2/configs/generators/stylegan_unet.py deleted file mode 100644 index 638859263a1cb549f533b75b2b19609665b3443e..0000000000000000000000000000000000000000 --- a/spaces/haakohu/deep_privacy2/configs/generators/stylegan_unet.py +++ /dev/null @@ -1,22 +0,0 @@ -from dp2.generator.stylegan_unet import StyleGANUnet -from tops.config import LazyCall as L - -generator = L(StyleGANUnet)( - imsize="${data.imsize}", - im_channels="${data.im_channels}", - min_fmap_resolution=8, - cnum=64, - max_cnum_mul=8, - n_middle_blocks=0, - z_channels=512, - mask_output=True, - conv_clamp=256, - input_cse=True, - scale_grad=True, - cse_nc="${data.cse_nc}", - w_dim=512, - n_keypoints="${data.n_keypoints}", - input_keypoints=False, - input_keypoint_indices=[], - fix_errors=True -) \ No newline at end of file diff --git a/spaces/hamelcubsfan/AutoGPT/autogpt/memory/local.py b/spaces/hamelcubsfan/AutoGPT/autogpt/memory/local.py deleted file mode 100644 index 803b6dc6ebb430285f423cda592fa3e902e9a4a6..0000000000000000000000000000000000000000 --- a/spaces/hamelcubsfan/AutoGPT/autogpt/memory/local.py +++ /dev/null @@ -1,136 +0,0 @@ -from __future__ import annotations - -import dataclasses -import os -from typing import Any, List - -import numpy as np -import orjson - -from autogpt.llm_utils import create_embedding_with_ada -from autogpt.memory.base import MemoryProviderSingleton - -EMBED_DIM = 1536 -SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS - - -def create_default_embeddings(): - return np.zeros((0, EMBED_DIM)).astype(np.float32) - - -@dataclasses.dataclass -class CacheContent: - texts: List[str] = dataclasses.field(default_factory=list) - embeddings: np.ndarray = dataclasses.field( - default_factory=create_default_embeddings - ) - - -class LocalCache(MemoryProviderSingleton): - """A class that stores the memory in a local file""" - - def __init__(self, cfg) -> None: - """Initialize a class instance - - Args: - cfg: Config object - - Returns: - None - """ - self.filename = f"{cfg.memory_index}.json" - if os.path.exists(self.filename): - try: - with open(self.filename, "w+b") as f: - file_content = f.read() - if not file_content.strip(): - file_content = b"{}" - f.write(file_content) - - loaded = orjson.loads(file_content) - self.data = CacheContent(**loaded) - except orjson.JSONDecodeError: - print(f"Error: The file '{self.filename}' is not in JSON format.") - self.data = CacheContent() - else: - print( - f"Warning: The file '{self.filename}' does not exist. " - "Local memory would not be saved to a file." - ) - self.data = CacheContent() - - def add(self, text: str): - """ - Add text to our list of texts, add embedding as row to our - embeddings-matrix - - Args: - text: str - - Returns: None - """ - if "Command Error:" in text: - return "" - self.data.texts.append(text) - - embedding = create_embedding_with_ada(text) - - vector = np.array(embedding).astype(np.float32) - vector = vector[np.newaxis, :] - self.data.embeddings = np.concatenate( - [ - self.data.embeddings, - vector, - ], - axis=0, - ) - - with open(self.filename, "wb") as f: - out = orjson.dumps(self.data, option=SAVE_OPTIONS) - f.write(out) - return text - - def clear(self) -> str: - """ - Clears the redis server. - - Returns: A message indicating that the memory has been cleared. - """ - self.data = CacheContent() - return "Obliviated" - - def get(self, data: str) -> list[Any] | None: - """ - Gets the data from the memory that is most relevant to the given data. - - Args: - data: The data to compare to. - - Returns: The most relevant data. - """ - return self.get_relevant(data, 1) - - def get_relevant(self, text: str, k: int) -> list[Any]: - """ " - matrix-vector mult to find score-for-each-row-of-matrix - get indices for top-k winning scores - return texts for those indices - Args: - text: str - k: int - - Returns: List[str] - """ - embedding = create_embedding_with_ada(text) - - scores = np.dot(self.data.embeddings, embedding) - - top_k_indices = np.argsort(scores)[-k:][::-1] - - return [self.data.texts[i] for i in top_k_indices] - - def get_stats(self) -> tuple[int, tuple[int, ...]]: - """ - Returns: The stats of the local cache. - """ - return len(self.data.texts), self.data.embeddings.shape diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/backbone/efficientnet.py b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/backbone/efficientnet.py deleted file mode 100644 index c7528a5e0ac0e83e2eaf18959b24aff0affa1f84..0000000000000000000000000000000000000000 --- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/backbone/efficientnet.py +++ /dev/null @@ -1,691 +0,0 @@ -""" - EfficientNet for ImageNet-1K, implemented in PyTorch. - Original papers: - - 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946, - - 'Adversarial Examples Improve Image Recognition,' https://arxiv.org/abs/1911.09665. -""" - -import os -import math -import torch -import torch.nn as nn -import torch.nn.functional as F - -from maskrcnn_benchmark.layers import SEBlock, swish - - -def round_channels(channels, - divisor=8): - """ - Round weighted channel number (make divisible operation). - - Parameters: - ---------- - channels : int or float - Original number of channels. - divisor : int, default 8 - Alignment value. - - Returns - ------- - int - Weighted number of channels. - """ - rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor) - if float(rounded_channels) < 0.9 * channels: - rounded_channels += divisor - return rounded_channels - - -def calc_tf_padding(x, - kernel_size, - stride=1, - dilation=1): - """ - Calculate TF-same like padding size. - - Parameters: - ---------- - x : tensor - Input tensor. - kernel_size : int - Convolution window size. - stride : int, default 1 - Strides of the convolution. - dilation : int, default 1 - Dilation value for convolution layer. - - Returns - ------- - tuple of 4 int - The size of the padding. - """ - height, width = x.size()[2:] - oh = math.ceil(height / stride) - ow = math.ceil(width / stride) - pad_h = max((oh - 1) * stride + (kernel_size - 1) * dilation + 1 - height, 0) - pad_w = max((ow - 1) * stride + (kernel_size - 1) * dilation + 1 - width, 0) - return pad_h // 2, pad_h - pad_h // 2, pad_w // 2, pad_w - pad_w // 2 - - -class ConvBlock(nn.Module): - """ - Standard convolution block with Batch normalization and activation. - - Parameters: - ---------- - in_channels : int - Number of input channels. - out_channels : int - Number of output channels. - kernel_size : int or tuple/list of 2 int - Convolution window size. - stride : int or tuple/list of 2 int - Strides of the convolution. - padding : int, or tuple/list of 2 int, or tuple/list of 4 int - Padding value for convolution layer. - dilation : int or tuple/list of 2 int, default 1 - Dilation value for convolution layer. - groups : int, default 1 - Number of groups. - bias : bool, default False - Whether the layer uses a bias vector. - use_bn : bool, default True - Whether to use BatchNorm layer. - bn_eps : float, default 1e-5 - Small float added to variance in Batch norm. - activation : function or str or None, default nn.ReLU(inplace=True) - Activation function or name of activation function. - """ - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride, - padding, - dilation=1, - groups=1, - bias=False, - use_bn=True, - bn_eps=1e-5, - activation=nn.ReLU(inplace=True)): - super(ConvBlock, self).__init__() - self.activate = (activation is not None) - self.use_bn = use_bn - self.use_pad = (isinstance(padding, (list, tuple)) and (len(padding) == 4)) - - if self.use_pad: - self.pad = nn.ZeroPad2d(padding=padding) - padding = 0 - self.conv = nn.Conv2d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - bias=bias) - if self.use_bn: - self.bn = nn.BatchNorm2d( - num_features=out_channels, - eps=bn_eps) - if self.activate: - self.activ = activation - - def forward(self, x): - if self.use_pad: - x = self.pad(x) - x = self.conv(x) - if self.use_bn: - x = self.bn(x) - if self.activate: - x = self.activ(x) - return x - - -def conv1x1_block(in_channels, - out_channels, - stride=1, - padding=0, - groups=1, - bias=False, - use_bn=True, - bn_eps=1e-5, - activation=nn.ReLU(inplace=True)): - """ - 1x1 version of the standard convolution block. - - Parameters: - ---------- - in_channels : int - Number of input channels. - out_channels : int - Number of output channels. - stride : int or tuple/list of 2 int, default 1 - Strides of the convolution. - padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 0 - Padding value for convolution layer. - groups : int, default 1 - Number of groups. - bias : bool, default False - Whether the layer uses a bias vector. - use_bn : bool, default True - Whether to use BatchNorm layer. - bn_eps : float, default 1e-5 - Small float added to variance in Batch norm. - activation : function or str or None, default nn.ReLU(inplace=True) - Activation function or name of activation function. - """ - return ConvBlock( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=1, - stride=stride, - padding=padding, - groups=groups, - bias=bias, - use_bn=use_bn, - bn_eps=bn_eps, - activation=activation) - - -def conv3x3_block(in_channels, - out_channels, - stride=1, - padding=1, - dilation=1, - groups=1, - bias=False, - use_bn=True, - bn_eps=1e-5, - activation=nn.ReLU(inplace=True)): - """ - 3x3 version of the standard convolution block. - - Parameters: - ---------- - in_channels : int - Number of input channels. - out_channels : int - Number of output channels. - stride : int or tuple/list of 2 int, default 1 - Strides of the convolution. - padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1 - Padding value for convolution layer. - dilation : int or tuple/list of 2 int, default 1 - Dilation value for convolution layer. - groups : int, default 1 - Number of groups. - bias : bool, default False - Whether the layer uses a bias vector. - use_bn : bool, default True - Whether to use BatchNorm layer. - bn_eps : float, default 1e-5 - Small float added to variance in Batch norm. - activation : function or str or None, default nn.ReLU(inplace=True) - Activation function or name of activation function. - """ - return ConvBlock( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=3, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - bias=bias, - use_bn=use_bn, - bn_eps=bn_eps, - activation=activation) - - -def dwconv3x3_block(in_channels, - out_channels, - stride=1, - padding=1, - dilation=1, - bias=False, - bn_eps=1e-5, - activation=nn.ReLU(inplace=True)): - """ - 3x3 depthwise version of the standard convolution block. - - Parameters: - ---------- - in_channels : int - Number of input channels. - out_channels : int - Number of output channels. - stride : int or tuple/list of 2 int, default 1 - Strides of the convolution. - padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1 - Padding value for convolution layer. - dilation : int or tuple/list of 2 int, default 1 - Dilation value for convolution layer. - bias : bool, default False - Whether the layer uses a bias vector. - bn_eps : float, default 1e-5 - Small float added to variance in Batch norm. - activation : function or str or None, default nn.ReLU(inplace=True) - Activation function or name of activation function. - """ - return ConvBlock( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=3, - stride=stride, - padding=padding, - dilation=dilation, - groups=out_channels, - bias=bias, - use_bn=True, - bn_eps=bn_eps, - activation=activation) - - -def dwconv5x5_block(in_channels, - out_channels, - stride=1, - padding=2, - dilation=1, - bias=False, - bn_eps=1e-5, - activation=nn.ReLU(inplace=True)): - """ - 5x5 depthwise version of the standard convolution block. - - Parameters: - ---------- - in_channels : int - Number of input channels. - out_channels : int - Number of output channels. - stride : int or tuple/list of 2 int, default 1 - Strides of the convolution. - padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 2 - Padding value for convolution layer. - dilation : int or tuple/list of 2 int, default 1 - Dilation value for convolution layer. - bias : bool, default False - Whether the layer uses a bias vector. - bn_eps : float, default 1e-5 - Small float added to variance in Batch norm. - activation : function or str or None, default nn.ReLU(inplace=True) - Activation function or name of activation function. - """ - return ConvBlock( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=5, - stride=stride, - padding=padding, - dilation=dilation, - groups=out_channels, - bias=bias, - use_bn=True, - bn_eps=bn_eps, - activation=activation) - - -class EffiDwsConvUnit(nn.Module): - """ - EfficientNet specific depthwise separable convolution block/unit with BatchNorms and activations at each convolution - layers. - - Parameters: - ---------- - in_channels : int - Number of input channels. - out_channels : int - Number of output channels. - stride : int or tuple/list of 2 int - Strides of the second convolution layer. - bn_eps : float - Small float added to variance in Batch norm. - activation : str - Name of activation function. - tf_mode : bool - Whether to use TF-like mode. - """ - def __init__(self, - in_channels, - out_channels, - stride, - bn_eps, - activation, - tf_mode): - super(EffiDwsConvUnit, self).__init__() - self.tf_mode = tf_mode - self.residual = (in_channels == out_channels) and (stride == 1) - - self.dw_conv = dwconv3x3_block( - in_channels=in_channels, - out_channels=in_channels, - padding=(0 if tf_mode else 1), - bn_eps=bn_eps, - activation=activation) - self.se = SEBlock( - channels=in_channels, - reduction=4, - mid_activation=activation) - self.pw_conv = conv1x1_block( - in_channels=in_channels, - out_channels=out_channels, - bn_eps=bn_eps, - activation=None) - - def forward(self, x): - if self.residual: - identity = x - if self.tf_mode: - x = F.pad(x, pad=calc_tf_padding(x, kernel_size=3)) - x = self.dw_conv(x) - x = self.se(x) - x = self.pw_conv(x) - if self.residual: - x = x + identity - return x - - -class EffiInvResUnit(nn.Module): - """ - EfficientNet inverted residual unit. - - Parameters: - ---------- - in_channels : int - Number of input channels. - out_channels : int - Number of output channels. - kernel_size : int or tuple/list of 2 int - Convolution window size. - stride : int or tuple/list of 2 int - Strides of the second convolution layer. - exp_factor : int - Factor for expansion of channels. - se_factor : int - SE reduction factor for each unit. - bn_eps : float - Small float added to variance in Batch norm. - activation : str - Name of activation function. - tf_mode : bool - Whether to use TF-like mode. - """ - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride, - exp_factor, - se_factor, - bn_eps, - activation, - tf_mode): - super(EffiInvResUnit, self).__init__() - self.kernel_size = kernel_size - self.stride = stride - self.tf_mode = tf_mode - self.residual = (in_channels == out_channels) and (stride == 1) - self.use_se = se_factor > 0 - mid_channels = in_channels * exp_factor - dwconv_block_fn = dwconv3x3_block if kernel_size == 3 else (dwconv5x5_block if kernel_size == 5 else None) - - self.conv1 = conv1x1_block( - in_channels=in_channels, - out_channels=mid_channels, - bn_eps=bn_eps, - activation=activation) - self.conv2 = dwconv_block_fn( - in_channels=mid_channels, - out_channels=mid_channels, - stride=stride, - padding=(0 if tf_mode else (kernel_size // 2)), - bn_eps=bn_eps, - activation=activation) - if self.use_se: - self.se = SEBlock( - channels=mid_channels, - reduction=(exp_factor * se_factor), - mid_activation=activation) - self.conv3 = conv1x1_block( - in_channels=mid_channels, - out_channels=out_channels, - bn_eps=bn_eps, - activation=None) - - def forward(self, x): - if self.residual: - identity = x - x = self.conv1(x) - if self.tf_mode: - x = F.pad(x, pad=calc_tf_padding(x, kernel_size=self.kernel_size, stride=self.stride)) - x = self.conv2(x) - if self.use_se: - x = self.se(x) - x = self.conv3(x) - if self.residual: - x = x + identity - return x - - -class EffiInitBlock(nn.Module): - """ - EfficientNet specific initial block. - - Parameters: - ---------- - in_channels : int - Number of input channels. - out_channels : int - Number of output channels. - bn_eps : float - Small float added to variance in Batch norm. - activation : str - Name of activation function. - tf_mode : bool - Whether to use TF-like mode. - """ - - def __init__(self, - in_channels, - out_channels, - bn_eps, - activation, - tf_mode): - super(EffiInitBlock, self).__init__() - self.tf_mode = tf_mode - - self.conv = conv3x3_block( - in_channels=in_channels, - out_channels=out_channels, - stride=2, - padding=(0 if tf_mode else 1), - bn_eps=bn_eps, - activation=activation) - - def forward(self, x): - if self.tf_mode: - x = F.pad(x, pad=calc_tf_padding(x, kernel_size=3, stride=2)) - x = self.conv(x) - return x - - -class EfficientNet(nn.Module): - """ - EfficientNet model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' - https://arxiv.org/abs/1905.11946. - - Parameters: - ---------- - channels : list of list of int - Number of output channels for each unit. - init_block_channels : int - Number of output channels for initial unit. - final_block_channels : int - Number of output channels for the final block of the feature extractor. - kernel_sizes : list of list of int - Number of kernel sizes for each unit. - strides_per_stage : list int - Stride value for the first unit of each stage. - expansion_factors : list of list of int - Number of expansion factors for each unit. - dropout_rate : float, default 0.2 - Fraction of the input units to drop. Must be a number between 0 and 1. - tf_mode : bool, default False - Whether to use TF-like mode. - bn_eps : float, default 1e-5 - Small float added to variance in Batch norm. - in_channels : int, default 3 - Number of input channels. - in_size : tuple of two ints, default (224, 224) - Spatial size of the expected input image. - num_classes : int, default 1000 - Number of classification classes. - """ - def __init__(self, - cfg, - channels, - init_block_channels, - kernel_sizes, - strides_per_stage, - expansion_factors, - tf_mode=False, - bn_eps=1e-5, - in_channels=3): - super(EfficientNet, self).__init__() - activation = swish() - - self.out_channels = [] - self.features = nn.Sequential() - self.stages = [] - stem = EffiInitBlock( - in_channels=in_channels, - out_channels=init_block_channels, - bn_eps=bn_eps, - activation=activation, - tf_mode=tf_mode) - self.features.add_module("init_block", stem) - self.stages.append(stem) - - in_channels = init_block_channels - for i, channels_per_stage in enumerate(channels): - kernel_sizes_per_stage = kernel_sizes[i] - expansion_factors_per_stage = expansion_factors[i] - stage = nn.Sequential() - for j, out_channels in enumerate(channels_per_stage): - kernel_size = kernel_sizes_per_stage[j] - expansion_factor = expansion_factors_per_stage[j] - stride = strides_per_stage[i] if (j == 0) else 1 - if i == 0: - stage.add_module("unit{}".format(j + 1), EffiDwsConvUnit( - in_channels=in_channels, - out_channels=out_channels, - stride=stride, - bn_eps=bn_eps, - activation=activation, - tf_mode=tf_mode)) - else: - stage.add_module("unit{}".format(j + 1), EffiInvResUnit( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=stride, - exp_factor=expansion_factor, - se_factor=4, - bn_eps=bn_eps, - activation=activation, - tf_mode=tf_mode)) - in_channels = out_channels - if i>0: - self.out_channels.append(out_channels) - self.features.add_module("stage{}".format(i + 1), stage) - self.stages.append(stage) - # Optionally freeze (requires_grad=False) parts of the backbone - self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT) - - def _freeze_backbone(self, freeze_at): - if freeze_at < 0: - return - for stage_index in range(freeze_at): - m = self.stages[stage_index] - for p in m.parameters(): - p.requires_grad = False - - def forward(self, x): - res = [] - for i, stage in enumerate(self.stages): - x = stage(x) - if i>1: - res.append(x) - return res - - -def get_efficientnet(cfg, version, tf_mode = True, bn_eps=1e-5, **kwargs): - if version == "b0": - depth_factor = 1.0 - width_factor = 1.0 - elif version == "b1": - depth_factor = 1.1 - width_factor = 1.0 - elif version == "b2": - depth_factor = 1.2 - width_factor = 1.1 - elif version == "b3": - depth_factor = 1.4 - width_factor = 1.2 - elif version == "b4": - depth_factor = 1.8 - width_factor = 1.4 - elif version == "b5": - depth_factor = 2.2 - width_factor = 1.6 - elif version == "b6": - depth_factor = 2.6 - width_factor = 1.8 - elif version == "b7": - depth_factor = 3.1 - width_factor = 2.0 - elif version == "b8": - depth_factor = 3.6 - width_factor = 2.2 - else: - raise ValueError("Unsupported EfficientNet version {}".format(version)) - - init_block_channels = 32 - layers = [1, 2, 2, 3, 3, 4, 1] - downsample = [1, 1, 1, 1, 0, 1, 0] - channels_per_layers = [16, 24, 40, 80, 112, 192, 320] - expansion_factors_per_layers = [1, 6, 6, 6, 6, 6, 6] - kernel_sizes_per_layers = [3, 3, 5, 3, 5, 5, 3] - strides_per_stage = [1, 2, 2, 2, 1, 2, 1] - - layers = [int(math.ceil(li * depth_factor)) for li in layers] - channels_per_layers = [round_channels(ci * width_factor) for ci in channels_per_layers] - - from functools import reduce - channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], - zip(channels_per_layers, layers, downsample), []) - kernel_sizes = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], - zip(kernel_sizes_per_layers, layers, downsample), []) - expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], - zip(expansion_factors_per_layers, layers, downsample), []) - strides_per_stage = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], - zip(strides_per_stage, layers, downsample), []) - strides_per_stage = [si[0] for si in strides_per_stage] - - init_block_channels = round_channels(init_block_channels * width_factor) - - net = EfficientNet( - cfg, - channels=channels, - init_block_channels=init_block_channels, - kernel_sizes=kernel_sizes, - strides_per_stage=strides_per_stage, - expansion_factors=expansion_factors, - tf_mode=tf_mode, - bn_eps=bn_eps, - **kwargs) - - return net diff --git a/spaces/hardon-server/space-diffusion-txt2img-1-5/README.md b/spaces/hardon-server/space-diffusion-txt2img-1-5/README.md deleted file mode 100644 index 40165a4bcf2d1096b9b6ee14eae89f5ca77ba2c1..0000000000000000000000000000000000000000 --- a/spaces/hardon-server/space-diffusion-txt2img-1-5/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Space Diffusion Txt2img 1.5 -emoji: 👁 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.41.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/hjzhp/cgpt-online/src/pages/api/auth.ts b/spaces/hjzhp/cgpt-online/src/pages/api/auth.ts deleted file mode 100644 index b0029e35e1a94b9b916846a0e25680f7915dbe4d..0000000000000000000000000000000000000000 --- a/spaces/hjzhp/cgpt-online/src/pages/api/auth.ts +++ /dev/null @@ -1,12 +0,0 @@ -import type { APIRoute } from 'astro' - -const realPassword = import.meta.env.SITE_PASSWORD - -export const post: APIRoute = async(context) => { - const body = await context.request.json() - - const { pass } = body - return new Response(JSON.stringify({ - code: (!realPassword || pass === realPassword) ? 0 : -1, - })) -} diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/evaluation/evaluator.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/evaluation/evaluator.py deleted file mode 100644 index 8f65631494319484b52a61f54d3346c76d9481d7..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/evaluation/evaluator.py +++ /dev/null @@ -1,490 +0,0 @@ -# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import collections -import inspect -import json -import hashlib -from datetime import datetime -from multiprocessing.pool import Pool -import numpy as np -import pandas as pd -import SimpleITK as sitk -from nnunet.evaluation.metrics import ConfusionMatrix, ALL_METRICS -from batchgenerators.utilities.file_and_folder_operations import save_json, subfiles, join -from collections import OrderedDict - - -class Evaluator: - """Object that holds test and reference segmentations with label information - and computes a number of metrics on the two. 'labels' must either be an - iterable of numeric values (or tuples thereof) or a dictionary with string - names and numeric values. - """ - - default_metrics = [ - "False Positive Rate", - "Dice", - "Jaccard", - "Precision", - "Recall", - "Accuracy", - "False Omission Rate", - "Negative Predictive Value", - "False Negative Rate", - "True Negative Rate", - "False Discovery Rate", - "Total Positives Test", - "Total Positives Reference" - ] - - default_advanced_metrics = [ - # "Hausdorff Distance", - "Hausdorff Distance 95", - # "Avg. Surface Distance", - # "Avg. Symmetric Surface Distance" - ] - - def __init__(self, - test=None, - reference=None, - labels=None, - metrics=None, - advanced_metrics=None, - nan_for_nonexisting=True): - - self.test = None - self.reference = None - self.confusion_matrix = ConfusionMatrix() - self.labels = None - self.nan_for_nonexisting = nan_for_nonexisting - self.result = None - - self.metrics = [] - if metrics is None: - for m in self.default_metrics: - self.metrics.append(m) - else: - for m in metrics: - self.metrics.append(m) - - self.advanced_metrics = [] - if advanced_metrics is None: - for m in self.default_advanced_metrics: - self.advanced_metrics.append(m) - else: - for m in advanced_metrics: - self.advanced_metrics.append(m) - - self.set_reference(reference) - self.set_test(test) - if labels is not None: - self.set_labels(labels) - else: - if test is not None and reference is not None: - self.construct_labels() - - def set_test(self, test): - """Set the test segmentation.""" - - self.test = test - - def set_reference(self, reference): - """Set the reference segmentation.""" - - self.reference = reference - - def set_labels(self, labels): - """Set the labels. - :param labels= may be a dictionary (int->str), a set (of ints), a tuple (of ints) or a list (of ints). Labels - will only have names if you pass a dictionary""" - - if isinstance(labels, dict): - self.labels = collections.OrderedDict(labels) - elif isinstance(labels, set): - self.labels = list(labels) - elif isinstance(labels, np.ndarray): - self.labels = [i for i in labels] - elif isinstance(labels, (list, tuple)): - self.labels = labels - else: - raise TypeError( - "Can only handle dict, list, tuple, set & numpy array, but input is of type {}".format(type(labels))) - - def construct_labels(self): - """Construct label set from unique entries in segmentations.""" - - if self.test is None and self.reference is None: - raise ValueError("No test or reference segmentations.") - elif self.test is None: - labels = np.unique(self.reference) - else: - labels = np.union1d(np.unique(self.test), - np.unique(self.reference)) - self.labels = list(map(lambda x: int(x), labels)) - - def set_metrics(self, metrics): - """Set evaluation metrics""" - - if isinstance(metrics, set): - self.metrics = list(metrics) - elif isinstance(metrics, (list, tuple, np.ndarray)): - self.metrics = metrics - else: - raise TypeError( - "Can only handle list, tuple, set & numpy array, but input is of type {}".format(type(metrics))) - - def add_metric(self, metric): - - if metric not in self.metrics: - self.metrics.append(metric) - - def evaluate(self, test=None, reference=None, advanced=False, **metric_kwargs): - """Compute metrics for segmentations.""" - if test is not None: - self.set_test(test) - - if reference is not None: - self.set_reference(reference) - - if self.test is None or self.reference is None: - raise ValueError("Need both test and reference segmentations.") - - if self.labels is None: - self.construct_labels() - - self.metrics.sort() - - # get functions for evaluation - # somewhat convoluted, but allows users to define additonal metrics - # on the fly, e.g. inside an IPython console - _funcs = {m: ALL_METRICS[m] for m in self.metrics + self.advanced_metrics} - frames = inspect.getouterframes(inspect.currentframe()) - for metric in self.metrics: - for f in frames: - if metric in f[0].f_locals: - _funcs[metric] = f[0].f_locals[metric] - break - else: - if metric in _funcs: - continue - else: - raise NotImplementedError( - "Metric {} not implemented.".format(metric)) - - # get results - self.result = OrderedDict() - - eval_metrics = self.metrics - if advanced: - eval_metrics += self.advanced_metrics - - if isinstance(self.labels, dict): - - for label, name in self.labels.items(): - k = str(name) - self.result[k] = OrderedDict() - if not hasattr(label, "__iter__"): - self.confusion_matrix.set_test(self.test == label) - self.confusion_matrix.set_reference(self.reference == label) - else: - current_test = 0 - current_reference = 0 - for l in label: - current_test += (self.test == l) - current_reference += (self.reference == l) - self.confusion_matrix.set_test(current_test) - self.confusion_matrix.set_reference(current_reference) - for metric in eval_metrics: - self.result[k][metric] = _funcs[metric](confusion_matrix=self.confusion_matrix, - nan_for_nonexisting=self.nan_for_nonexisting, - **metric_kwargs) - - else: - for j, label in enumerate(self.labels): - self.result[j] = OrderedDict() - for i, l in enumerate(label): - # TODO add second label (Done) - k = str(l) - self.result[j][k] = OrderedDict() - self.confusion_matrix.set_test(self.test[j] == l) - self.confusion_matrix.set_reference(self.reference[j] == l) - for metric in eval_metrics: - self.result[j][k][metric] = _funcs[metric](confusion_matrix=self.confusion_matrix, - nan_for_nonexisting=self.nan_for_nonexisting, - **metric_kwargs) - - return self.result - - def to_dict(self): - - if self.result is None: - self.evaluate() - return self.result - - def to_array(self): - """Return result as numpy array (labels x metrics).""" - - if self.result is None: - self.evaluate - - result_metrics = sorted(self.result[list(self.result.keys())[0]].keys()) - - a = np.zeros((len(self.labels), len(result_metrics)), dtype=np.float32) - - if isinstance(self.labels, dict): - for i, label in enumerate(self.labels.keys()): - for j, metric in enumerate(result_metrics): - a[i][j] = self.result[self.labels[label]][metric] - else: - for i, label in enumerate(self.labels): - for j, metric in enumerate(result_metrics): - a[i][j] = self.result[label][metric] - - return a - - def to_pandas(self): - """Return result as pandas DataFrame.""" - - a = self.to_array() - - if isinstance(self.labels, dict): - labels = list(self.labels.values()) - else: - labels = self.labels - - result_metrics = sorted(self.result[list(self.result.keys())[0]].keys()) - - return pd.DataFrame(a, index=labels, columns=result_metrics) - - -class NiftiEvaluator(Evaluator): - - def __init__(self, *args, **kwargs): - - self.test_nifti = None - self.reference_nifti = None - super(NiftiEvaluator, self).__init__(*args, **kwargs) - - def set_test(self, test): - """Set the test segmentation.""" - - if test is not None: - # TODO test images has only zone prediction Look at the code where image is saved. (Done) - self.test_nifti = sitk.ReadImage(test) - super(NiftiEvaluator, self).set_test(sitk.GetArrayFromImage(self.test_nifti)) - else: - self.test_nifti = None - super(NiftiEvaluator, self).set_test(test) - - def set_reference(self, reference): - """Set the reference segmentation.""" - - if reference is not None: - self.reference_nifti = sitk.ReadImage(reference) - super(NiftiEvaluator, self).set_reference(sitk.GetArrayFromImage(self.reference_nifti)) - else: - self.reference_nifti = None - super(NiftiEvaluator, self).set_reference(reference) - - def evaluate(self, test=None, reference=None, voxel_spacing=None, **metric_kwargs): - - if voxel_spacing is None: - voxel_spacing = np.array(self.test_nifti.GetSpacing())[::-1] - metric_kwargs["voxel_spacing"] = voxel_spacing - - return super(NiftiEvaluator, self).evaluate(test, reference, **metric_kwargs) - - -def run_evaluation(args): - test, ref, evaluator, metric_kwargs = args - # evaluate - evaluator.set_test(test) - evaluator.set_reference(ref) - if evaluator.labels is None: - evaluator.construct_labels() - current_scores = evaluator.evaluate(**metric_kwargs) - if type(test) == str: - current_scores["test"] = test - if type(ref) == str: - current_scores["reference"] = ref - return current_scores - - -def aggregate_scores(test_ref_pairs, - evaluator=NiftiEvaluator, - labels=None, - nanmean=True, - json_output_file=None, - json_name="", - json_description="", - json_author="Fabian", - json_task="", - num_threads=2, - **metric_kwargs): - """ - test = predicted image - :param test_ref_pairs: - :param evaluator: - :param labels: must be a dict of int-> str or a list of int - :param nanmean: - :param json_output_file: - :param json_name: - :param json_description: - :param json_author: - :param json_task: - :param metric_kwargs: - :return: - """ - - if type(evaluator) == type: - evaluator = evaluator() - - if labels is not None: - evaluator.set_labels(labels) - - test = [i[0] for i in test_ref_pairs] - ref = [i[1] for i in test_ref_pairs] - # all_res= [run_evaluation((test[0], ref[0], evaluator, metric_kwargs))] - - p = Pool(num_threads) - all_res = p.map(run_evaluation, zip(test, ref, [evaluator] * len(ref), [metric_kwargs] * len(ref))) - p.close() - p.join() - - all_scores = OrderedDict() - - for mask in range(len(labels)): - all_scores[mask] = OrderedDict() - all_scores[mask]["all"] = [] - all_scores[mask]["mean"] = OrderedDict() - for i in range(len(all_res)): - all_scores[mask]["all"].append(all_res[i][mask]) - - # append score list for mean - for label, score_dict in all_res[i][mask].items(): - if label in ("test", "reference"): - continue - if label not in all_scores[mask]["mean"]: - all_scores[mask]["mean"][label] = OrderedDict() - for score, value in score_dict.items(): - if score not in all_scores[mask]["mean"][label]: - all_scores[mask]["mean"][label][score] = [] - all_scores[mask]["mean"][label][score].append(value) - - for label in all_scores[mask]["mean"]: - for score in all_scores[mask]["mean"][label]: - if nanmean: - all_scores[mask]["mean"][label][score] = float(np.nanmean(all_scores[mask]["mean"][label][score])) - else: - all_scores[mask]["mean"][label][score] = float(np.mean(all_scores[mask]["mean"][label][score])) - - # save to file if desired - # we create a hopefully unique id by hashing the entire output dictionary - if json_output_file is not None: - json_dict = OrderedDict() - json_dict["name"] = json_name - json_dict["description"] = json_description - timestamp = datetime.today() - json_dict["timestamp"] = str(timestamp) - json_dict["task"] = json_task - json_dict["author"] = json_author - json_dict["results"] = all_scores - json_dict["id"] = hashlib.md5(json.dumps(json_dict).encode("utf-8")).hexdigest()[:12] - save_json(json_dict, json_output_file) - - return all_scores - - -def aggregate_scores_for_experiment(score_file, - labels=None, - metrics=Evaluator.default_metrics, - nanmean=True, - json_output_file=None, - json_name="", - json_description="", - json_author="Fabian", - json_task=""): - scores = np.load(score_file) - scores_mean = scores.mean(0) - if labels is None: - labels = list(map(str, range(scores.shape[1]))) - - results = [] - results_mean = OrderedDict() - for i in range(scores.shape[0]): - results.append(OrderedDict()) - for l, label in enumerate(labels): - results[-1][label] = OrderedDict() - results_mean[label] = OrderedDict() - for m, metric in enumerate(metrics): - results[-1][label][metric] = float(scores[i][l][m]) - results_mean[label][metric] = float(scores_mean[l][m]) - - json_dict = OrderedDict() - json_dict["name"] = json_name - json_dict["description"] = json_description - timestamp = datetime.today() - json_dict["timestamp"] = str(timestamp) - json_dict["task"] = json_task - json_dict["author"] = json_author - json_dict["results"] = {"all": results, "mean": results_mean} - json_dict["id"] = hashlib.md5(json.dumps(json_dict).encode("utf-8")).hexdigest()[:12] - if json_output_file is not None: - json_output_file = open(json_output_file, "w") - json.dump(json_dict, json_output_file, indent=4, separators=(",", ": ")) - json_output_file.close() - - return json_dict - - -def evaluate_folder(folder_with_gts: str, folder_with_predictions: str, labels: tuple, **metric_kwargs): - """ - writes a summary.json to folder_with_predictions - :param folder_with_gts: folder where the ground truth segmentations are saved. Must be nifti files. - :param folder_with_predictions: folder where the predicted segmentations are saved. Must be nifti files. - :param labels: tuple of int with the labels in the dataset. For example (0, 1, 2, 3) for Task001_BrainTumour. - :return: - """ - files_gt = subfiles(folder_with_gts, suffix=".nii.gz", join=False) - files_pred = subfiles(folder_with_predictions, suffix=".nii.gz", join=False) - assert all([i in files_pred for i in files_gt]), "files missing in folder_with_predictions" - assert all([i in files_gt for i in files_pred]), "files missing in folder_with_gts" - test_ref_pairs = [(join(folder_with_predictions, i), join(folder_with_gts, i)) for i in files_pred] - res = aggregate_scores(test_ref_pairs, json_output_file=join(folder_with_predictions, "summary.json"), - num_threads=8, labels=labels, **metric_kwargs) - return res - - -def nnunet_evaluate_folder(): - import argparse - parser = argparse.ArgumentParser("Evaluates the segmentations located in the folder pred. Output of this script is " - "a json file. At the very bottom of the json file is going to be a 'mean' " - "entry with averages metrics across all cases") - parser.add_argument('-ref', required=True, type=str, help="Folder containing the reference segmentations in nifti " - "format.") - parser.add_argument('-pred', required=True, type=str, help="Folder containing the predicted segmentations in nifti " - "format. File names must match between the folders!") - parser.add_argument('-l', nargs='+', type=int, required=True, help="List of label IDs (integer values) that should " - "be evaluated. Best practice is to use all int " - "values present in the dataset, so for example " - "for LiTS the labels are 0: background, 1: " - "liver, 2: tumor. So this argument " - "should be -l 1 2. You can if you want also " - "evaluate the background label (0) but in " - "this case that would not gie any useful " - "information.") - args = parser.parse_args() - return evaluate_folder(args.ref, args.pred, args.l) diff --git a/spaces/hoshilumine/combined-GI-RVC-models/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py b/spaces/hoshilumine/combined-GI-RVC-models/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py deleted file mode 100644 index b2c592527a5966e6f8e79e8c52dc5b414246dcc6..0000000000000000000000000000000000000000 --- a/spaces/hoshilumine/combined-GI-RVC-models/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py +++ /dev/null @@ -1,97 +0,0 @@ -from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import parselmouth -import numpy as np - - -class PMF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def compute_f0(self, wav, p_len=None): - x = wav - if p_len is None: - p_len = x.shape[0] // self.hop_length - else: - assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - time_step = self.hop_length / self.sampling_rate * 1000 - f0 = ( - parselmouth.Sound(x, self.sampling_rate) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0, uv = self.interpolate_f0(f0) - return f0 - - def compute_f0_uv(self, wav, p_len=None): - x = wav - if p_len is None: - p_len = x.shape[0] // self.hop_length - else: - assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - time_step = self.hop_length / self.sampling_rate * 1000 - f0 = ( - parselmouth.Sound(x, self.sampling_rate) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0, uv = self.interpolate_f0(f0) - return f0, uv diff --git a/spaces/hra/GPT4-makes-BabyAGI/app.py b/spaces/hra/GPT4-makes-BabyAGI/app.py deleted file mode 100644 index 1802ade9d0a0e67381f053ec9156172a64701366..0000000000000000000000000000000000000000 --- a/spaces/hra/GPT4-makes-BabyAGI/app.py +++ /dev/null @@ -1,163 +0,0 @@ -import gradio as gr -import openai -import requests -import json -import os -from duckduckgo_search import ddg -import datetime -from datetime import datetime, date, time, timedelta - - -def search_duckduckgo(query): - - keywords = query - results = ddg(keywords, region='wt-wt', safesearch='Off', time='m') - filtered_results = [{"title": res["title"], "body": res["body"]} for res in results] - print(filtered_results) - return filtered_results - -def get_search_query(task): - - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=[ - {"role": "user", "content": f"Given the task: {task}. Generate a concise search query with 1-3 keywords."} - ] - ) - search_query = response.choices[0]['message']['content'].strip() - print("Agent 2: ",search_query) - return search_query - -def summarize_search_result(task, search_result): - - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=[ - {"role": "user", "content":f"Given the task '{task}' and the search result '{json.dumps(search_result)}', provide a summarized result."} - ] - ) - summary = response.choices[0]['message']['content'].strip() - return summary - -def agent_1(objective): - - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=[ - {"role": "user", "content":f"Given the objective '{objective}', create a list of tasks that are closely related to the objective. If needed add specific key words from objective to the task sentence"} - ] - ) - tasks = response.choices[0]['message']['content'].strip().split('\n') - return tasks - -def agent_2(task): - - search_query = get_search_query(task) - print("Agent 2") - print(search_query) - search_results = search_duckduckgo(search_query) - summarized_result = summarize_search_result(task, search_results) - print(summarized_result) - return summarized_result - -def agent_3(objective, last_result, tasks): - - task_list = '\n'.join(tasks) - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=[ - {"role": "user", "content":f"Given the objective '{objective}', the last result '{json.dumps(last_result)}', and the task list:\n{task_list}\n\nRank the tasks based on their relevance to the objective."} - ] - ) - modified_tasks = response.choices[0]['message']['content'].strip().split('\n') - print("Agent 3") - print(modified_tasks) - return modified_tasks - -def summarize_result(objective, result): - - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=[ - {"role": "user", "content":f"Given the objective '{objective}' and the final result '{json.dumps(result)}', provide a summary."} - ] - ) - summary = response.choices[0]['message']['content'].strip() - return summary - -def generate_final_answer(objective, all_results): - - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=[ - {"role": "user", "content":f"Given the objective '{objective}' and the collected results '{json.dumps(all_results)}', provide a final answer addressing the objective."} - ] - ) - final_answer = response.choices[0]['message']['content'].strip() - return final_answer - -def main(objective, loop_count): - tasks = agent_1(objective) - all_results = [] - completed_tasks = [] - for i in range(loop_count): - print(i+1) - if i < len(tasks): - print('NEXT TASK: ',tasks[i]) - completed_tasks.append(tasks[i]) - result = agent_2(tasks[i]) - all_results.append(result) - tasks = agent_3(objective, result, tasks) - print('*********************') - else: - break - - final_answer = generate_final_answer(objective, all_results) - return final_answer,completed_tasks,tasks - -def getbabyagianswer(objective,loop_count,openapikey): - dateforfilesave=datetime.today().strftime("%d-%m-%Y %I:%M%p") - print(objective) - print(dateforfilesave) - - if openapikey=='': - return ["Please provide OpenAPI Key","Please provide OpenAPI Key","Please provide OpenAPI Key"] - - os.environ['OPENAI_API_KEY'] = str(openapikey) - openai.api_key=str(openapikey) - - final_summary,completed_tasts,all_tasks = main(objective, int(loop_count)) - print("Final Summary:", final_summary) - return final_summary,completed_tasts,all_tasks - - -with gr.Blocks() as demo: - gr.Markdown("

      GPT4 created BabyAGI

      ") - gr.Markdown( - """ This is part of a series of experiments using BabyAGI as a "framework" to construct focused use cases (ex: idea generation). In this GPT-4 was prompted to create a BabyAGI with task creation & execution agents but with constraints to give answer with a specified number of loops. Unlike the original BabyAGI concept, this is not open-ended. \n\nNote: This is a series of experiments to understand AI agents and hence do check the quality of output. OpenAI agents (gpt-3.5-turbo) & DuckDuckGo search are used. The analysis takes roughly 120 secs & may not always be consistent. An error occurs when the OpenAI Api key is not provided/ ChatGPT API is overloaded/ ChatGPT is unable to correctly decipher & format the output\n\n""" - ) - - with gr.Row() as row: - with gr.Column(): - textboxtopic = gr.Textbox(placeholder="Enter Objective/ Goal...", lines=1,label='Objective') - with gr.Column(): - textboxloopcount = gr.Textbox(placeholder="Enter # of loops...", lines=1,label='Loop Count') - with gr.Column(): - textboxopenapi = gr.Textbox(placeholder="Enter OpenAPI Key...", lines=1,label='OpenAPI Key') - with gr.Row() as row: - examples = gr.Examples(examples=['Give me a startup idea in healthcare technology for India','Which is a must see destination in Mysore?','Find me a unique cuisine restaurant in bangalore','Give me a startup idea for AI in music streaming'], - inputs=[textboxtopic]) - with gr.Row() as row: - btn = gr.Button("Unleash AI Agent") - - with gr.Row() as row: - with gr.Column(): - answer1 = gr.Textbox(placeholder="", lines=1,label='Answer') - with gr.Column(): - fulltasklist1 = gr.Textbox(placeholder="", lines=1,label='Full Task List') - with gr.Column(): - completedtasklist1 = gr.Textbox(placeholder="", lines=1,label='Completed Tasks') - - btn.click(getbabyagianswer, inputs=[textboxtopic,textboxloopcount,textboxopenapi,],outputs=[answer1,fulltasklist1,completedtasklist1]) - -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/huaiji3y/bingo-Public/src/components/ui/dialog.tsx b/spaces/huaiji3y/bingo-Public/src/components/ui/dialog.tsx deleted file mode 100644 index 925e77fe7858fb218b5115b4e225174a886e0f02..0000000000000000000000000000000000000000 --- a/spaces/huaiji3y/bingo-Public/src/components/ui/dialog.tsx +++ /dev/null @@ -1,128 +0,0 @@ -'use client' - -import * as React from 'react' -import * as DialogPrimitive from '@radix-ui/react-dialog' - -import { cn } from '@/lib/utils' -import { IconClose } from '@/components/ui/icons' - -const Dialog = DialogPrimitive.Root - -const DialogTrigger = DialogPrimitive.Trigger - -const DialogPortal = ({ - className, - children, - ...props -}: DialogPrimitive.DialogPortalProps) => ( - -
      - {children} -
      -
      -) -DialogPortal.displayName = DialogPrimitive.Portal.displayName - -const DialogOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogOverlay.displayName = DialogPrimitive.Overlay.displayName - -const DialogContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - {children} - - - Close - - - -)) -DialogContent.displayName = DialogPrimitive.Content.displayName - -const DialogHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
      -) -DialogHeader.displayName = 'DialogHeader' - -const DialogFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
      -) -DialogFooter.displayName = 'DialogFooter' - -const DialogTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogTitle.displayName = DialogPrimitive.Title.displayName - -const DialogDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogDescription.displayName = DialogPrimitive.Description.displayName - -export { - Dialog, - DialogTrigger, - DialogContent, - DialogHeader, - DialogFooter, - DialogTitle, - DialogDescription -} diff --git a/spaces/huggan/butterfly-gan/assets/code_snippets/latent_walk.py b/spaces/huggan/butterfly-gan/assets/code_snippets/latent_walk.py deleted file mode 100644 index 0568771e51843db0d38f8987b98a28332f32ddea..0000000000000000000000000000000000000000 --- a/spaces/huggan/butterfly-gan/assets/code_snippets/latent_walk.py +++ /dev/null @@ -1,15 +0,0 @@ -# Some parameters -n_points = 6 #@param -n_steps = 300 #@param -latents = torch.randn(n_points, 256) - -# Loop through generating the frames -frames = [] -for i in tqdm(range(n_steps)): - p1 = max(0, int(n_points*i/n_steps)) - p2 = min(n_points, int(n_points*i/n_steps)+1)%n_points # so it wraps back to 0 - frac = (i-(p1*(n_steps/n_points))) / (n_steps/n_points) - l = latents[p1]*(1-frac) + latents[p2]*frac - im = model.G(l.unsqueeze(0)).clamp_(0., 1.) - frame=(im[0].permute(1, 2, 0).detach().cpu().numpy()*255).astype(np.uint8) - frames.append(frame) \ No newline at end of file diff --git a/spaces/huggingchat/chat-ui/src/lib/utils/streamToAsyncIterable.ts b/spaces/huggingchat/chat-ui/src/lib/utils/streamToAsyncIterable.ts deleted file mode 100644 index e935d719c8c29eb5e4efc30812f61b5f44716923..0000000000000000000000000000000000000000 --- a/spaces/huggingchat/chat-ui/src/lib/utils/streamToAsyncIterable.ts +++ /dev/null @@ -1,15 +0,0 @@ -// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for-await...of#iterating_over_async_generators -export async function* streamToAsyncIterable( - stream: ReadableStream -): AsyncIterableIterator { - const reader = stream.getReader(); - try { - while (true) { - const { done, value } = await reader.read(); - if (done) return; - yield value; - } - } finally { - reader.releaseLock(); - } -} diff --git a/spaces/huggingface-projects/llama-2-7b-chat/app.py b/spaces/huggingface-projects/llama-2-7b-chat/app.py deleted file mode 100644 index 3f888fab8974f5b25ecc069fc8f12277244330c9..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/llama-2-7b-chat/app.py +++ /dev/null @@ -1,145 +0,0 @@ -import os -from threading import Thread -from typing import Iterator - -import gradio as gr -import spaces -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer - -MAX_MAX_NEW_TOKENS = 2048 -DEFAULT_MAX_NEW_TOKENS = 1024 -MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096")) - -DESCRIPTION = """\ -# Llama-2 7B Chat - -This Space demonstrates model [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta, a Llama 2 model with 7B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints). - -🔎 For more details about the Llama 2 family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/llama2). - -🔨 Looking for an even more powerful model? Check out the [13B version](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat) or the large [70B model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI). -""" - -LICENSE = """ -

      - ---- -As a derivate work of [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta, -this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/USE_POLICY.md). -""" - -if not torch.cuda.is_available(): - DESCRIPTION += "\n

      Running on CPU 🥶 This demo does not work on CPU.

      " - - -if torch.cuda.is_available(): - model_id = "meta-llama/Llama-2-7b-chat-hf" - model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto") - tokenizer = AutoTokenizer.from_pretrained(model_id) - tokenizer.use_default_system_prompt = False - - -@spaces.GPU -def generate( - message: str, - chat_history: list[tuple[str, str]], - system_prompt: str, - max_new_tokens: int = 1024, - temperature: float = 0.6, - top_p: float = 0.9, - top_k: int = 50, - repetition_penalty: float = 1.2, -) -> Iterator[str]: - conversation = [] - if system_prompt: - conversation.append({"role": "system", "content": system_prompt}) - for user, assistant in chat_history: - conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}]) - conversation.append({"role": "user", "content": message}) - - input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt") - if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH: - input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:] - gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.") - input_ids = input_ids.to(model.device) - - streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True) - generate_kwargs = dict( - {"input_ids": input_ids}, - streamer=streamer, - max_new_tokens=max_new_tokens, - do_sample=True, - top_p=top_p, - top_k=top_k, - temperature=temperature, - num_beams=1, - repetition_penalty=repetition_penalty, - ) - t = Thread(target=model.generate, kwargs=generate_kwargs) - t.start() - - outputs = [] - for text in streamer: - outputs.append(text) - yield "".join(outputs) - - -chat_interface = gr.ChatInterface( - fn=generate, - additional_inputs=[ - gr.Textbox(label="System prompt", lines=6), - gr.Slider( - label="Max new tokens", - minimum=1, - maximum=MAX_MAX_NEW_TOKENS, - step=1, - value=DEFAULT_MAX_NEW_TOKENS, - ), - gr.Slider( - label="Temperature", - minimum=0.1, - maximum=4.0, - step=0.1, - value=0.6, - ), - gr.Slider( - label="Top-p (nucleus sampling)", - minimum=0.05, - maximum=1.0, - step=0.05, - value=0.9, - ), - gr.Slider( - label="Top-k", - minimum=1, - maximum=1000, - step=1, - value=50, - ), - gr.Slider( - label="Repetition penalty", - minimum=1.0, - maximum=2.0, - step=0.05, - value=1.2, - ), - ], - stop_btn=None, - examples=[ - ["Hello there! How are you doing?"], - ["Can you explain briefly to me what is the Python programming language?"], - ["Explain the plot of Cinderella in a sentence."], - ["How many hours does it take a man to eat a Helicopter?"], - ["Write a 100-word article on 'Benefits of Open-Source in AI research'"], - ], -) - -with gr.Blocks(css="style.css") as demo: - gr.Markdown(DESCRIPTION) - gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button") - chat_interface.render() - gr.Markdown(LICENSE) - -if __name__ == "__main__": - demo.queue(max_size=20).launch() diff --git a/spaces/huolongguo10/HlgBot/README.md b/spaces/huolongguo10/HlgBot/README.md deleted file mode 100644 index 82c3568a1b3eff755e79d9388e6c9e3269cf2e69..0000000000000000000000000000000000000000 --- a/spaces/huolongguo10/HlgBot/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: ChatYuan Large V2 -emoji: 📊 -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: creativeml-openrail-m -duplicated_from: ClueAI/ChatYuan-large-v2 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/hussain-shk/IndiSent/legacy/apply_bpe_train_notag.sh b/spaces/hussain-shk/IndiSent/legacy/apply_bpe_train_notag.sh deleted file mode 100644 index fa24a57dc2a8b26eed1aae66793f9a65c2712e26..0000000000000000000000000000000000000000 --- a/spaces/hussain-shk/IndiSent/legacy/apply_bpe_train_notag.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -expdir=$1 # EXPDIR - -#`dirname $0`/env.sh -SUBWORD_NMT_DIR="subword-nmt" - -data_dir="$expdir/data" -train_file=$data_dir/train -bpe_file=$expdir/bpe/train/train - -mkdir -p $expdir/bpe/train - -echo "Apply to SRC corpus" - -python $SUBWORD_NMT_DIR/subword_nmt/apply_bpe.py \ - -c $expdir/vocab/bpe_codes.32k.SRC_TGT \ - --vocabulary $expdir/vocab/vocab.SRC \ - --vocabulary-threshold 5 \ - --num-workers "-1" \ - < $train_file.SRC \ - > $bpe_file.SRC - -echo "Apply to TGT corpus" - -python $SUBWORD_NMT_DIR/subword_nmt/apply_bpe.py \ - -c $expdir/vocab/bpe_codes.32k.SRC_TGT \ - --vocabulary $expdir/vocab/vocab.TGT \ - --vocabulary-threshold 5 \ - --num-workers "-1" \ - < $train_file.TGT \ - > $bpe_file.TGT - diff --git a/spaces/huy-ha/semabs-relevancy/app.py b/spaces/huy-ha/semabs-relevancy/app.py deleted file mode 100644 index a4aa8886175075445df9310e7c98dfe325c6b010..0000000000000000000000000000000000000000 --- a/spaces/huy-ha/semabs-relevancy/app.py +++ /dev/null @@ -1,165 +0,0 @@ -import gradio as gr -import numpy as np -from CLIP.clip import ClipWrapper, saliency_configs -from time import time -from matplotlib import pyplot as plt -import io -from PIL import Image, ImageDraw, ImageFont -import matplotlib - -matplotlib.use("Agg") - -tag = """ - -""" - - -def plot_to_png(fig): - buf = io.BytesIO() - plt.savefig(buf, format="png") - buf.seek(0) - img = np.array(Image.open(buf)).astype(np.uint8) - return img - - -def add_text_to_image( - image: np.ndarray, - text, - position, - color="rgb(255, 255, 255)", - fontsize=60, -): - image = Image.fromarray(image) - draw = ImageDraw.Draw(image) - draw.text( - position, - text, - fill=color, - font=ImageFont.truetype( - "/usr/share/fonts/truetype/lato/Lato-Medium.ttf", fontsize - ), - ) - return np.array(image) - - -def generate_relevancy( - img: np.array, labels: str, prompt: str, saliency_config: str, subtract_mean: bool -): - labels = labels.split(",") - if len(labels) > 32: - labels = labels[:32] - prompts = [prompt] - resize_shape = np.array(img.shape[:2]) - resize_shape = tuple( - ((resize_shape / resize_shape.max()) * 224 * 4).astype(int).tolist() - ) - img = np.asarray(Image.fromarray(img).resize(resize_shape)) - assert img.dtype == np.uint8 - h, w, c = img.shape - start = time() - try: - grads = ClipWrapper.get_clip_saliency( - img=img, - text_labels=np.array(labels), - prompts=prompts, - **saliency_configs[saliency_config](h), - )[0] - except Exception as e: - print(e) - return ( - [img], - tag, - ) - print("inference took", float(time() - start)) - if subtract_mean: - grads -= grads.mean(axis=0) - grads = grads.cpu().numpy() - vmin = 0.002 - cmap = plt.get_cmap("jet") - vmax = 0.008 - - returns = [] - for label_grad, label in zip(grads, labels): - fig, ax = plt.subplots(1, 1, figsize=(4, 4)) - ax.axis("off") - ax.imshow(img) - grad = np.clip((label_grad - vmin) / (vmax - vmin), a_min=0.0, a_max=1.0) - colored_grad = cmap(grad) - grad = 1 - grad - colored_grad[..., -1] = grad * 0.7 - colored_grad = add_text_to_image( - (colored_grad * 255).astype(np.uint8), text=label, position=(0, 0) - ) - colored_grad = colored_grad.astype(float) / 255 - ax.imshow(colored_grad) - plt.tight_layout(pad=0) - returns.append(plot_to_png(fig)) - plt.close(fig) - return ( - returns, - tag, - ) - - -iface = gr.Interface( - title="Semantic Abstraction Multi-scale Relevancy Extractor", - description="""A CPU-only demo of [Semantic Abstraction](https://semantic-abstraction.cs.columbia.edu/)'s Multi-Scale Relevancy Extractor. To run GPU inference locally, use the [official codebase release](https://github.com/columbia-ai-robotics/semantic-abstraction). - -This relevancy extractor builds heavily on [Chefer et al.'s codebase](https://github.com/hila-chefer/Transformer-MM-Explainability) and [CLIP on Wheels' codebase](https://cow.cs.columbia.edu/).""", - fn=generate_relevancy, - cache_examples=True, - inputs=[ - gr.Image(type="numpy", label="Image"), - gr.Textbox(label="Labels (comma separated without spaces in between)"), - gr.Textbox( - label="Prompt. (Make sure to include '{}' in the prompt like examples below)" - ), - gr.Dropdown( - value="ours", - choices=["ours", "ours_fast", "chefer_et_al"], - label="Relevancy Configuration", - ), - gr.Checkbox(value=True, label="subtract mean"), - ], - outputs=[ - gr.Gallery(label="Relevancy Maps", type="numpy"), - gr.HTML(value=tag), - ], - examples=[ - [ - "https://semantic-abstraction.cs.columbia.edu/downloads/gameroom.png", - "basketball jersey,nintendo switch,television,ping pong table,vase,fireplace,abstract painting of a vespa,carpet,wall", - "a photograph of a {} in a home.", - "ours_fast", - True, - ], - [ - "https://semantic-abstraction.cs.columbia.edu/downloads/livingroom.png", - "monopoly boardgame set,door knob,sofa,coffee table,plant,carpet,wall", - "a photograph of a {} in a home.", - "ours_fast", - True, - ], - [ - "https://semantic-abstraction.cs.columbia.edu/downloads/fireplace.png", - "fireplace,beige armchair,candle,large indoor plant in a pot,forest painting,cheetah-patterned pillow,floor,carpet,wall", - "a photograph of a {} in a home.", - "ours_fast", - True, - ], - [ - "https://semantic-abstraction.cs.columbia.edu/downloads/walle.png", - "WALL-E,a fire extinguisher", - "a 3D render of {}.", - "ours_fast", - True, - ], - ], -) -iface.launch() diff --git a/spaces/hysts/Text2Human/style.css b/spaces/hysts/Text2Human/style.css deleted file mode 100644 index 22ad0be91ed35841bc456be4a0044474affc9a17..0000000000000000000000000000000000000000 --- a/spaces/hysts/Text2Human/style.css +++ /dev/null @@ -1,16 +0,0 @@ -h1 { - text-align: center; -} -#input-image { - max-height: 300px; -} -#label-image { - height: 300px; -} -#result-image { - height: 300px; -} -img#visitor-badge { - display: block; - margin: auto; -} diff --git a/spaces/imseldrith/DeepFakeAI/DeepFakeAI/processors/frame/__init__.py b/spaces/imseldrith/DeepFakeAI/DeepFakeAI/processors/frame/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/inamXcontru/PoeticTTS/Astm D1143.pdfl.md b/spaces/inamXcontru/PoeticTTS/Astm D1143.pdfl.md deleted file mode 100644 index 5f88724d6b3ed261a92d333c3900a4b69b51135a..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Astm D1143.pdfl.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Astm D1143.pdfl


      Download Ziphttps://gohhs.com/2uz49r



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/innat/Google-MediaPipe/holistic.py b/spaces/innat/Google-MediaPipe/holistic.py deleted file mode 100644 index 1941b16533f832cad0c9d727f15c37594a61038c..0000000000000000000000000000000000000000 --- a/spaces/innat/Google-MediaPipe/holistic.py +++ /dev/null @@ -1,35 +0,0 @@ -import mediapipe as mp -from utils import read_n_resize - -def mp_holistic_fn(image, min_detect_conf=0.5): - mp_drawing = mp.solutions.drawing_utils - mp_drawing_styles = mp.solutions.drawing_styles - mp_holistic = mp.solutions.holistic - - with mp_holistic.Holistic( - static_image_mode=True, - model_complexity=2, - enable_segmentation=True, - refine_face_landmarks=True, - min_detection_confidence=min_detect_conf - ) as holistic: - image = read_n_resize(image, read=False) - results = holistic.process(image) - annotated_image = image.copy() - - mp_drawing.draw_landmarks( - annotated_image, - results.face_landmarks, - mp_holistic.FACEMESH_TESSELATION, - landmark_drawing_spec=None, - connection_drawing_spec=mp_drawing_styles - .get_default_face_mesh_tesselation_style()) - mp_drawing.draw_landmarks( - annotated_image, - results.pose_landmarks, - mp_holistic.POSE_CONNECTIONS, - landmark_drawing_spec=mp_drawing_styles. - get_default_pose_landmarks_style()) - - return annotated_image - diff --git a/spaces/innnky/soft-vits-vc/app.py b/spaces/innnky/soft-vits-vc/app.py deleted file mode 100644 index 457c1cf0bc31b16dfe99d6b6106b9a36e6ff2d08..0000000000000000000000000000000000000000 --- a/spaces/innnky/soft-vits-vc/app.py +++ /dev/null @@ -1,81 +0,0 @@ -import gradio as gr -import os -os.system('cd monotonic_align && python setup.py build_ext --inplace && cd ..') - -import logging - -numba_logger = logging.getLogger('numba') -numba_logger.setLevel(logging.WARNING) - -import librosa -import torch - -import commons -import utils -from models import SynthesizerTrn -from text.symbols import symbols -from text import text_to_sequence - - -def get_text(text, hps): - text_norm = text_to_sequence(text, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - print(text_norm.shape) - return text_norm - - -hps = utils.get_hparams_from_file("configs/ljs_base.json") - -net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model) -import numpy as np - -hubert = torch.hub.load("bshall/hubert:main", "hubert_soft") - -_ = utils.load_checkpoint("G_88000.pth", net_g, None) - -def vc_fn(input_audio): - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - # print(audio.shape,sampling_rate) - duration = audio.shape[0] / sampling_rate - if duration > 30: - return "Error: Audio is too long", None - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - source = torch.FloatTensor(audio).unsqueeze(0).unsqueeze(0) - print(source.shape) - with torch.inference_mode(): - units = hubert.units(source) - - stn_tst = torch.FloatTensor(units.squeeze(0)) - with torch.no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = torch.LongTensor([stn_tst.size(0)]) - audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=0.1, noise_scale_w=0.1, length_scale=1)[0][ - 0, 0].data.float().numpy() - - return "Success", (hps.data.sampling_rate, audio) - - - -app = gr.Blocks() -with app: - with gr.Tabs(): - with gr.TabItem("Basic"): - vc_input3 = gr.Audio(label="Input Audio (30s limitation)") - vc_submit = gr.Button("Convert", variant="primary") - vc_output1 = gr.Textbox(label="Output Message") - vc_output2 = gr.Audio(label="Output Audio") - vc_submit.click(vc_fn, [ vc_input3], [vc_output1, vc_output2]) - - app.launch() \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/2 Stroke Wizard Tuned Pipe Pro V4rar.md b/spaces/inplisQlawa/anything-midjourney-v4-1/2 Stroke Wizard Tuned Pipe Pro V4rar.md deleted file mode 100644 index 499a6ace20f62918967f12eccb3acd9130a6e56d..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/2 Stroke Wizard Tuned Pipe Pro V4rar.md +++ /dev/null @@ -1,7 +0,0 @@ - -

      Komaki Explore 01 1119P0818 1.3.3.0.9868 Full Version Price Free Downloads 35653 Novell Linux Advanced Server SP3 31 Logitracev12crackgratuitmega more.... Download 2 Stroke Wizard Tuned Pipe Pro V4rar download windows 10 - myhacker

      -

      2 Stroke Wizard Tuned Pipe Pro V4rar


      Download File ✸✸✸ https://urlin.us/2uEweD



      -

      Wav2MP3 Wizard 7.0.0 is compatible with Windows operating systems, such as Windows 10, 8, 7 and Vista, and is available for download in English, French, German and Spanish, the latter being the version used for this review.
      Users wishing to download the software can find the download link in the table above and have no trouble installing the platform (please refer to the user guide available for installation and configuration recommendations).
      The software can be bought at 9

      -

      If you are interested in these topics, make sure to visit the web page at programmablepdf.com , the leading platform for online financial trading. Read the 1 rar The recordingist is the person responsible for the recording of all the instruments used on the current recording. 2 Stroke Wizard Tuned Pipe Pro V4rar are hard at work getting the first version of the Solar Kingdom simulator ready for release. Its probably a good time to stop by our launch page and stay tuned for an announcement regarding the release date, although as you can tell weve already spilled the beans. In the meantime, feel free to drop us a line with any questions you have and we will do our best to answer them. List of all the files you need to save to be able to get back the available content. Some of these websites may ask you to create an account and login but it is entirely optional and to obtain the best results it is recommended to leave your email to the website and allow us to inform you when you have mail from the end product, as you need to complete the registration process. These websites are then able to communicate with the servers of the final content, as the user is identified by a unique code.
      The installation process is relatively easy, and within seconds after you start the setup wizard you are ready to use MagicMouse. MagicMouse needs to be installed on the PC, and the Magic parts do not require any drivers.
      Once enabled, a moving cursor and triple clicks are easily recognized in order to navigate windows, which can actually be used to move around the web page in a comfortable way, as most of the time the tags are limited in terms of space. This tool combines the best aspects of high-speed search engines (in other words, it indexes, caches and saves the results of all the searches it performs), and the ease of using traditional Internet search engines.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/ATIVADOR WINDOWS 10 KMS 2019 Serial Key.md b/spaces/inplisQlawa/anything-midjourney-v4-1/ATIVADOR WINDOWS 10 KMS 2019 Serial Key.md deleted file mode 100644 index e01c6abb373242a6fa9d6b1f2b0ce8c083478764..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/ATIVADOR WINDOWS 10 KMS 2019 Serial Key.md +++ /dev/null @@ -1,9 +0,0 @@ -

      ATIVADOR WINDOWS 10 KMS 2019 Serial Key


      DOWNLOAD »»» https://urlin.us/2uExih



      -
      -In today's post, I'll show you how to activate Windows on your VPS or Dedicated Server for FREE using KMS client product keys. Although the KMS license ...... already allows you to activate Windows on your VPS or Dedicated Server, there are many free ways to activate Windows on your VPS or Dedicated Server using programs and online services. -Luckily, we have a free product key that we can convert into KMS client product keys using our software. -In this article, I will share with you detailed information about activating Windows on your VPS or Dedicated Server. -Well, here's what you need to know: 8a78ff9644
      -
      -
      -

      diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Activar Autocad 2013 Sin Keygen Crack [PATCHED].md b/spaces/inplisQlawa/anything-midjourney-v4-1/Activar Autocad 2013 Sin Keygen Crack [PATCHED].md deleted file mode 100644 index 50c389fe9cbf3273c4deed6bd7fd65bb68a9c641..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Activar Autocad 2013 Sin Keygen Crack [PATCHED].md +++ /dev/null @@ -1,105 +0,0 @@ - -

      Activar Autocad 2013 Sin Keygen Crack: Todo lo que necesitas saber

      - -

      Autocad 2013 es un software de diseño y dibujo en 2D y 3D que te permite crear proyectos de arquitectura, ingeniería, mecánica, eléctrica y más. Es uno de los programas de CAD más populares y usados del mundo, tanto por profesionales como por aficionados. Sin embargo, para poder usar Autocad 2013 necesitas activarlo con una clave de producto y un número de serie válidos, que normalmente te proporciona Autodesk cuando compras el software. Pero ¿qué pasa si no tienes una clave de producto o un número de serie? ¿Qué pasa si los perdiste o te los robaron? ¿Qué pasa si quieres usar Autocad 2013 en más de un ordenador? En ese caso, podrías estar tentado a usar un keygen crack, que es un programa que genera claves de producto y números de serie falsos para Autocad 2013. Sin embargo, esto no es una buena idea, porque usar un keygen crack puede tener graves consecuencias para tu ordenador y tus datos.

      - -

      En este artículo te vamos a contar cómo activar Autocad 2013 sin keygen crack con el X-Force KeyGenerator, que es una herramienta legítima que genera claves de producto y números de serie genuinos para Autocad 2013. El X-Force KeyGenerator es creado por X-Force Team, que es un grupo de hackers que apoyan los productos de Autodesk y proporcionan soluciones de activación para ellos. El X-Force KeyGenerator es fácil de usar y funciona tanto para las versiones de 32 bits como de 64 bits de Autocad 2013. Puedes descargarlo desde varias fuentes en internet, como Davi24 , SoundCloud , CivilMDC , o Google Drive . Sin embargo, siempre debes escanear los archivos descargados con tu software antivirus antes de usarlos, ya que algunos de ellos pueden contener falsos positivos o riesgos.

      -

      Activar Autocad 2013 Sin Keygen Crack


      DOWNLOAD ✯✯✯ https://urlin.us/2uEwpo



      - -

      ¿Qué es el X-Force KeyGenerator?

      - -

      El X-Force KeyGenerator es un programa que genera claves de producto y números de serie genuinos para Autocad 2013 y otros productos de Autodesk. Estas claves y números son necesarios para activar el software y poder usarlo sin restricciones ni limitaciones. El X-Force KeyGenerator es creado por X-Force Team, que es un grupo de hackers que apoyan los productos de Autodesk y proporcionan soluciones de activación para ellos. El X-Force KeyGenerator no es un keygen crack, sino una herramienta legítima que respeta los términos de servicio y el acuerdo de licencia de Autodesk.

      - -

      El X-Force KeyGenerator funciona tanto para las versiones de 32 bits como de 64 bits de Autocad 2013. También funciona para otros productos de Autodesk, como Revit, Maya, Inventor, 3ds Max y más. El X-Force KeyGenerator es fácil de descargar y usar, y solo requiere unos pocos pasos para activar Autocad 2013 sin keygen crack.

      - -

      ¿Cómo descargar el X-Force KeyGenerator?

      - -

      Para descargar el X-Force KeyGenerator para activar Autocad 2013 sin keygen crack, solo tienes que seguir estos pasos:

      - -
        -
      1. Elige una fuente de descarga confiable y segura, como las que te hemos mencionado anteriormente.
      2. -
      3. Haz clic en el enlace de descarga directa del X-Force KeyGenerator para la versión de 32 bits o 64 bits de Autocad 2013, según la que tengas instalada en tu ordenador.
      4. -
      5. Espera a que se complete la descarga del archivo .rar o .zip del X-Force KeyGenerator.
      6. -
      7. Descomprime el archivo .rar o .zip con un programa como WinRAR o 7-Zip.
      8. -
      9. Obtendrás una carpeta con el archivo ejecutable del X-Force KeyGenerator y un archivo de texto con las instrucciones de uso.
      10. -
      - -

      ¿Cómo usar el X-Force KeyGenerator?

      - -

      Para usar el X-Force KeyGenerator para activar Autocad 2013 sin keygen crack, solo tienes que seguir estos pasos:

      - -
        -
      1. Instala Autocad 2013 en tu ordenador si aún no lo has hecho.
      2. -
      3. Ejecuta el archivo del X-Force KeyGenerator como administrador.
      4. -
      5. Selecciona "Autocad 2013" de la lista de productos.
      6. -
      7. Haz clic en "Generate" para obtener una clave de producto y un número de serie para Autocad 2013.
      8. -
      9. Ejecuta Autocad 2013 y sigue las instrucciones para activarlo.
      10. -
      11. Copia el código que aparece en el campo "Request code" en Autocad 2013 y pégalo en el campo "Request" del X-Force KeyGenerator.
      12. -
      13. Haz clic en "Patch" y luego en "OK" en el X-Force KeyGenerator.
      14. -
      15. Copia el código que aparece en el campo "Activation" del X-Force KeyGenerator y pégalo en el campo "Enter your activation code here" en Autocad 2013.
      16. -
      17. Haz clic en "Next" y luego en "Finish" en Autocad 2013.
      18. -
      19. ¡Listo! Ya has activado Autocad 2013 sin keygen crack con el X-Force KeyGenerator.
      20. -
      - -

      ¿Cuáles son los beneficios de usar el X-Force KeyGenerator?

      - -

      Usar el X-Force KeyGenerator para activar Autocad 2013 sin keygen crack tiene varios beneficios:

      - -
        -
      • Es legal y ético. No estás violando los términos de servicio ni el acuerdo

        -

        ¿Cuáles son los beneficios de usar el X-Force KeyGenerator?

        - -

        Usar el X-Force KeyGenerator para activar Autocad 2013 sin keygen crack tiene varios beneficios:

        -

        - -
          -
        • Es legal y ético. No estás violando los términos de servicio ni el acuerdo de licencia de Autodesk. Tampoco estás privando a Autodesk de sus ingresos legítimos.
        • -
        • Es seguro y confiable. No estás descargando ni ejecutando programas maliciosos que puedan infectar tu ordenador o robar tus datos. Tampoco estás interfiriendo con el correcto funcionamiento del software.
        • -
        • Es fácil y rápido. No tienes que buscar ni introducir claves o números falsos que puedan no funcionar o ser detectados. Tampoco tienes que esperar ni pagar por una activación online o telefónica.
        • -
        • Es efectivo y permanente. No tienes que repetir el proceso cada vez que actualices o reinstales el software. Tampoco tienes que preocuparte por posibles bloqueos o desactivaciones.
        • -
        - -

        ¿Qué debes tener en cuenta al usar el X-Force KeyGenerator?

        - -

        Aunque usar el X-Force KeyGenerator para activar Autocad 2013 sin keygen crack es una opción viable y segura, hay algunas cosas que debes tener en cuenta al usarlo:

        - -
          -
        • No uses el X-Force KeyGenerator para activar otros productos de Autodesk que no sean Autocad 2013. Cada producto tiene su propio X-Force KeyGenerator específico, que puedes encontrar en internet.
        • -
        • No compartas ni distribuyas el X-Force KeyGenerator ni las claves o números que generes con él. Esto puede ser considerado como piratería y puede traerte problemas legales.
        • -
        • No uses el X-Force KeyGenerator en ordenadores públicos o compartidos. Esto puede comprometer tu seguridad y la de tus datos.
        • -
        • No uses el X-Force KeyGenerator si tienes una clave de producto o un número de serie válidos para Autocad 2013. Esto puede causar conflictos o incompatibilidades con el software.
        • -
        - -

        Conclusión

        - -

        Activar Autocad 2013 sin keygen crack es posible con el X-Force KeyGenerator, una herramienta legítima que genera claves de producto y números de serie genuinos para este software. El X-Force KeyGenerator es creado por X-Force Team, un grupo de hackers que apoyan los productos de Autodesk. El X-Force KeyGenerator es fácil de descargar y usar, y funciona tanto para las versiones de 32 bits como de 64 bits de Autocad 2013.

        - -

        Esperamos que este artículo te haya sido útil e interesante. Si quieres saber más sobre Activar Autocad 2013 sin keygen crack o sobre otros productos de Autodesk puedes visitar nuestro blog Blomiky donde encontrarás toda la información que necesitas. También puedes dejarnos tus comentarios o preguntas al final de este artículo. Estaremos encantados de responderte.

        -

        Si te ha gustado este artículo y quieres aprender más sobre Autocad 2013 y otros productos de Autodesk, te invitamos a que te suscribas a nuestro boletín de noticias. Así podrás recibir en tu correo electrónico las últimas novedades, consejos, tutoriales y ofertas exclusivas de Blomiky. Solo tienes que introducir tu nombre y tu dirección de email en el formulario que encontrarás a continuación y hacer clic en el botón de suscribirse. Es gratis y puedes cancelar tu suscripción en cualquier momento.

        - -

        No esperes más y únete a nuestra comunidad de usuarios de Autocad 2013 y otros productos de Autodesk. Te aseguramos que no te arrepentirás.

        - -
        - - - - - -
        -

        Si te ha gustado este artículo y quieres aprender más sobre Autocad 2013 y otros productos de Autodesk, te invitamos a que nos sigas en nuestras redes sociales. Así podrás estar al día de las últimas noticias, novedades, consejos, tutoriales y ofertas exclusivas de Blomiky. También podrás interactuar con otros usuarios de Autocad 2013 y otros productos de Autodesk, compartir tus experiencias, dudas y sugerencias. Solo tienes que hacer clic en los iconos que encontrarás a continuación y seguirnos en Facebook, Twitter, Instagram o YouTube. Es gratis y puedes dejar de seguirnos en cualquier momento.

        - -

        No esperes más y únete a nuestra comunidad de usuarios de Autocad 2013 y otros productos de Autodesk. Te esperamos con los brazos abiertos.

        - -Facebook -Twitter -Instagram -YouTube -

        En resumen, activar Autocad 2013 sin keygen crack es posible con el X-Force KeyGenerator, una herramienta legítima que genera claves de producto y números de serie genuinos para este software. El X-Force KeyGenerator es creado por X-Force Team, un grupo de hackers que apoyan los productos de Autodesk. El X-Force KeyGenerator es fácil de descargar y usar, y funciona tanto para las versiones de 32 bits como de 64 bits de Autocad 2013.

        - -

        Usar el X-Force KeyGenerator tiene varios beneficios, como ser legal, ético, seguro, confiable, fácil, rápido, efectivo y permanente. Sin embargo, también hay algunas cosas que debes tener en cuenta al usarlo, como no usarlo para otros productos de Autodesk, no compartirlo ni distribuirlo, no usarlo en ordenadores públicos o compartidos y no usarlo si tienes una clave o número válidos.

        - -

        Esperamos que este artículo te haya sido útil e interesante. Si quieres saber más sobre Activar Autocad 2013 sin keygen crack o sobre otros productos de Autodesk puedes visitar nuestro blog Blomiky donde encontrarás toda la información que necesitas. También puedes suscribirte a nuestro boletín de noticias o seguirnos en nuestras redes sociales para estar al día de las últimas novedades, consejos, tutoriales y ofertas exclusivas de Blomiky. También puedes dejarnos tus comentarios o preguntas al final de este artículo. Estaremos encantados de responderte.

        - -

        Gracias por leer y hasta la próxima.

        3cee63e6c2
        -
        -
        \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Amada Ap100 Software ((NEW)) Crack 28.md b/spaces/inreVtussa/clothingai/Examples/Amada Ap100 Software ((NEW)) Crack 28.md deleted file mode 100644 index ddfd7f1d521c0f0abca12f305882e72694335acb..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Amada Ap100 Software ((NEW)) Crack 28.md +++ /dev/null @@ -1,6 +0,0 @@ -

        amada ap100 software crack 28


        DOWNLOADhttps://tiurll.com/2uCkkU



        - -Amada Ap100 Software Crack 28 http://ssurll.com/10e5j7 aa94214199 2018年2月24日 ... Amada Ap100 Software Crack 28. Download. Amada ... 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/irfank/katanaml-donut-demo-3/app.py b/spaces/irfank/katanaml-donut-demo-3/app.py deleted file mode 100644 index 592e464d62c966a60f048b12621ccd06749efd65..0000000000000000000000000000000000000000 --- a/spaces/irfank/katanaml-donut-demo-3/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/katanaml/donut-demo").launch() \ No newline at end of file diff --git a/spaces/ivuxy/Eval/README.md b/spaces/ivuxy/Eval/README.md deleted file mode 100644 index deddd5d51e519164716f304240476a0395742600..0000000000000000000000000000000000000000 --- a/spaces/ivuxy/Eval/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Eval -emoji: 💻 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.47.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/jaleesahmed/correlation-and-visualization/README.md b/spaces/jaleesahmed/correlation-and-visualization/README.md deleted file mode 100644 index bef1a5ef2c706c27f817e25a27a1c9cd64224fa1..0000000000000000000000000000000000000000 --- a/spaces/jaleesahmed/correlation-and-visualization/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Correlation And Visualization -emoji: 📉 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.1.3 -app_file: app.py -pinned: false -license: lgpl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/jason9693/KoreanHateSpeechClassifier/attention.py b/spaces/jason9693/KoreanHateSpeechClassifier/attention.py deleted file mode 100644 index 502a74d5e398fafab7f06183cf9837e480eb7891..0000000000000000000000000000000000000000 --- a/spaces/jason9693/KoreanHateSpeechClassifier/attention.py +++ /dev/null @@ -1,97 +0,0 @@ -from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoConfig -import gradio as gr -from torch.nn import functional as F -import seaborn - -import matplotlib -import platform - -if platform.system() == "Darwin": - print("MacOS") - matplotlib.use('Agg') -import matplotlib.pyplot as plt -import io -from PIL import Image - -import matplotlib.font_manager as fm - - - - -import util - -font_path = r'NanumGothicCoding.ttf' -fontprop = fm.FontProperties(fname=font_path, size=18) - -plt.rcParams["font.family"] = 'NanumGothic' - - -def visualize_attention(sent, attention_matrix, n_words=10): - def draw(data, x, y, ax): - seaborn.heatmap(data, - xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0, - cbar=False, ax=ax) - - # make plt figure with 1x6 subplots - fig = plt.figure(figsize=(16, 8)) - # fig.subplots_adjust(hspace=0.7, wspace=0.2) - for i, layer in enumerate(range(1, 12, 2)): - ax = fig.add_subplot(2, 3, i+1) - ax.set_title("Layer {}".format(layer)) - draw(attention_matrix[layer], sent if layer > 6 else [], sent if layer in [1,7] else [], ax=ax) - - fig.tight_layout() - plt.close() - - return fig - - - -def predict(model_name, text): - - tokenizer = AutoTokenizer.from_pretrained(model_name) - model = AutoModelForSequenceClassification.from_pretrained(model_name) - config = AutoConfig.from_pretrained(model_name) - print(config.id2label) - - tokenized_text = tokenizer([text], return_tensors='pt') - - input_tokens = tokenizer.convert_ids_to_tokens(tokenized_text.input_ids[0]) - print(input_tokens) - input_tokens = util.bytetokens_to_unicdode(input_tokens) if config.model_type in ['roberta', 'gpt', 'gpt2'] else input_tokens - - model.eval() - output, attention = model(**tokenized_text, output_attentions=True, return_dict=False) - output = F.softmax(output, dim=-1) - result = {} - - for idx, label in enumerate(output[0].detach().numpy()): - result[config.id2label[idx]] = float(label) - - fig = visualize_attention(input_tokens, attention[0][0].detach().numpy()) - return result, fig#.logits.detach()#.numpy()#, output.attentions.detach().numpy() - - -if __name__ == '__main__': - - model_name = 'jason9693/SoongsilBERT-beep-base' - text = '읿딴걸 홍볿글 읿랉곭 쌑젩낄고 앉앟있냩' - # output = predict(model_name, text) - - # print(output) - - model_name_list = [ - 'jason9693/SoongsilBERT-beep-base' - ] - - #Create a gradio app with a button that calls predict() - app = gr.Interface( - fn=predict, - server_port=26899, - server_name='0.0.0.0', - inputs=[gr.inputs.Dropdown(model_name_list, label="Model Name"), 'text'], outputs=['label', 'plot'], - examples = [[model_name, text]], - title="한국어 혐오성 발화 분류기 (Korean Hate Speech Classifier)", - description="Korean Hate Speech Classifier with Several Pretrained LM\nCurrent Supported Model:\n1. SoongsilBERT" - ) - app.launch(inline=False) diff --git a/spaces/jbilcke-hf/AnimateDiff/download_bashscripts/8-GhibliBackground.sh b/spaces/jbilcke-hf/AnimateDiff/download_bashscripts/8-GhibliBackground.sh deleted file mode 100644 index 39b9e76ddf77a842e4f41acbee9e73f62c49eec0..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/AnimateDiff/download_bashscripts/8-GhibliBackground.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -wget https://civitai.com/api/download/models/102828 -P models/DreamBooth_LoRA/ --content-disposition --no-check-certificate -wget https://civitai.com/api/download/models/57618 -P models/DreamBooth_LoRA/ --content-disposition --no-check-certificate diff --git a/spaces/jeycov/IADERM-UTOPIC-PFIZER/app.py b/spaces/jeycov/IADERM-UTOPIC-PFIZER/app.py deleted file mode 100644 index c0611b461a8a5d09bc2c0937bf3328023b69dd8f..0000000000000000000000000000000000000000 --- a/spaces/jeycov/IADERM-UTOPIC-PFIZER/app.py +++ /dev/null @@ -1,89 +0,0 @@ -import gradio as gr -import tensorflow as tf - -path_to_model = "./modelo_jeysshon_iaderm.h5" - -model = tf.keras.models.load_model(path_to_model) - -labels = [ - 'Acné / Rosácea', 'Queratosis Actínica / Carcinoma Basocelular', - 'Dermatitis Atópica', 'Enfermedad Bullosa', - 'Celulitis Impétigo (Infecciones Bacterianas)', - 'Eczema', 'Exanthems (Erupciones Cutáneas por Medicamentos)', 'Pérdida de Cabello (Alopecia)', - 'Herpes HPV', 'Trastornos de Pigmentación', - 'Lupus', - 'Melanoma (Cáncer de Piel)', 'Hongos en las Uñas', - 'Hiedra Venenosa', - 'Psoriasis (Lichen Planus)', 'Sarna Lyme', - 'Queratosis Seborreica', 'Enfermedad Sistémica', - 'Tinea Ringworm (Infecciones Fúngicas)', - 'Urticaria Ronchas', 'Tumores Vasculares', 'Vasculitis', 'Verrugas Molusco' -] - -def classify_image(photos): - photos = photos.reshape((-1, 224, 224, 3)) - prediction = model.predict(photos).flatten() - confidences = {labels[i]: float(prediction[i]) for i in range(23)} - return confidences - -title = "AI-DERM DETECTION " - -article = ( - "Se propone un sistema automatizado para el diagnóstico de las 23 enfermedades comunes de la piel:\n\n" - "1. Acné / Rosácea\n" - "2. Queratosis Actínica / Carcinoma Basocelular\n" - "3. Dermatitis Atópica\n" - "4. Enfermedades Bullosas\n" - "5. Celulitis / Impétigo (Infecciones Bacterianas)\n" - "6. Eccema\n" - "7. Exantemas (Erupciones Cutáneas por Medicamentos)\n" - "8. (areata)\n" - "9. Herpes / VPH\n" - "10. Trastornos de la Pigmentación\n" - "11. Lupus\n" - "12. Melanoma (Cáncer de Piel)\n" - "13. Hongos en las Uñas\n" - "14. Hiedra Venenosa\n" - "15. Psoriasis (liquen plano)\n" - "16. Sarna / Enfermedad de Lyme\n" - "17. Queratosis Seborreica\n" - "18. Enfermedad Sistémica\n" - "19. Tiña / Tiña (Infecciones Fúngicas)\n" - "20. Urticaria / Ronchas\n" - "21. Tumores Vasculares\n" - "22. Vasculitis\n" - "23. Verrugas / Molusco\n\n" - "Este sistema automatizado se basa en un modelo preentrenado EfficientNetB7, capaz de diagnosticar 23 enfermedades cutáneas comunes. La interfaz te permite cargar una imagen y obtener las probabilidades de cada enfermedad detectada." - "

        " - "AI-DERM . Jeysshon Bustos . 2023." - "

        " -) - -description= ( - - "Utilizamos la interfaz de usuario generada por Gradio para ingresar imágenes a nuestra red neuronal convolucional, la cual ha sido entrenada con el propósito de realizar clasificaciones de imágenes. Esta red neuronal demostró su capacidad al lograr una precisa categorización de la imagen proporcionada. En ocasiones, resulta beneficioso ajustar el tamaño de la imagen mediante la interfaz de Gradio para potenciar aún más su rendimiento." -) - -examples = [ - ['./123.jpg'], - ['./acne-closed-comedo-2.jpg'], - ['./distal-subungual-onychomycosis-86.jpeg'], - ['./cherry-angioma-16.jpg'], - ['./malignant-melanoma-16.jpg'], - ['./tinea-primary-lesion-15.jpeg'], - ['./congenital-nevus-35.jpg'], - ['./tinea-body-137.jpg'], - ['./atopic-13.jpg'], - ['./atopic-7.jpg'] -] - -gr.Interface( - fn=classify_image, - title=title, - article=article, - description=description, - inputs=gr.inputs.Image(shape=(224, 224)), - outputs=gr.outputs.Label(num_top_classes=4), - examples=examples -).launch() - diff --git a/spaces/jharrison27/VR-DEMO/README.md b/spaces/jharrison27/VR-DEMO/README.md deleted file mode 100644 index a66d2e5b4e82f0bd5dea724bc2e52bf6d50b52a1..0000000000000000000000000000000000000000 --- a/spaces/jharrison27/VR-DEMO/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: VR DEMO -emoji: 🚀 -colorFrom: purple -colorTo: indigo -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/jhwen/bingo/src/components/markdown.tsx b/spaces/jhwen/bingo/src/components/markdown.tsx deleted file mode 100644 index d4491467a1f14d1d72e535caac9c40636054e5df..0000000000000000000000000000000000000000 --- a/spaces/jhwen/bingo/src/components/markdown.tsx +++ /dev/null @@ -1,9 +0,0 @@ -import { FC, memo } from 'react' -import ReactMarkdown, { Options } from 'react-markdown' - -export const MemoizedReactMarkdown: FC = memo( - ReactMarkdown, - (prevProps, nextProps) => - prevProps.children === nextProps.children && - prevProps.className === nextProps.className -) diff --git a/spaces/jinhybr/OCR-Invoice-LayoutLMv3/app.py b/spaces/jinhybr/OCR-Invoice-LayoutLMv3/app.py deleted file mode 100644 index 5615916416d367c54bff82ae6880ed26f107ad97..0000000000000000000000000000000000000000 --- a/spaces/jinhybr/OCR-Invoice-LayoutLMv3/app.py +++ /dev/null @@ -1,146 +0,0 @@ -import os - -os.system('pip install pip --upgrade') -os.system('pip install -q git+https://github.com/huggingface/transformers.git') - - -os.system("pip install pyyaml==5.1") -# workaround: install old version of pytorch since detectron2 hasn't released packages for pytorch 1.9 (issue: https://github.com/facebookresearch/detectron2/issues/3158) -os.system( - "pip install torch==1.8.0+cu101 torchvision==0.9.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html" -) - -# install detectron2 that matches pytorch 1.8 -# See https://detectron2.readthedocs.io/tutorials/install.html for instructions -os.system( - "pip install -q detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html" -) - -## install PyTesseract -os.system("pip install -q pytesseract") - -import gradio as gr -import numpy as np -from transformers import AutoModelForTokenClassification -from datasets.features import ClassLabel -from transformers import AutoProcessor -from datasets import Features, Sequence, ClassLabel, Value, Array2D, Array3D -import torch -from datasets import load_metric -from transformers import LayoutLMv3ForTokenClassification -from transformers.data.data_collator import default_data_collator - - -from transformers import AutoModelForTokenClassification -from datasets import load_dataset -from PIL import Image, ImageDraw, ImageFont - - -processor = AutoProcessor.from_pretrained("jinhybr/OCR-LayoutLMv3-Invoice", apply_ocr=True) -model = AutoModelForTokenClassification.from_pretrained("jinhybr/OCR-LayoutLMv3-Invoice") - - - -# load image example -dataset = load_dataset("jinhybr/WildReceipt", split="test") -Image.open(dataset[1]["image_path"]).convert("RGB").save("example1.png") -Image.open(dataset[3]["image_path"]).convert("RGB").save("example2.png") -Image.open(dataset[25]["image_path"]).convert("RGB").save("example3.png") -# define id2label, label2color -labels = dataset.features['ner_tags'].feature.names -id2label = {v: k for v, k in enumerate(labels)} -label2color = { - "Date_key": 'red', - "Date_value": 'green', - "Ignore": 'orange', - "Others": 'orange', - "Prod_item_key": 'red', - "Prod_item_value": 'green', - "Prod_price_key": 'red', - "Prod_price_value": 'green', - "Prod_quantity_key": 'red', - "Prod_quantity_value": 'green', - "Store_addr_key": 'red', - "Store_addr_value": 'green', - "Store_name_key": 'red', - "Store_name_value": 'green', - "Subtotal_key": 'red', - "Subtotal_value": 'green', - "Tax_key": 'red', - "Tax_value": 'green', - "Tel_key": 'red', - "Tel_value": 'green', - "Time_key": 'red', - "Time_value": 'green', - "Tips_key": 'red', - "Tips_value": 'green', - "Total_key": 'red', - "Total_value": 'blue' - } - -def unnormalize_box(bbox, width, height): - return [ - width * (bbox[0] / 1000), - height * (bbox[1] / 1000), - width * (bbox[2] / 1000), - height * (bbox[3] / 1000), - ] - - -def iob_to_label(label): - return label - - - -def process_image(image): - - print(type(image)) - width, height = image.size - - # encode - encoding = processor(image, truncation=True, return_offsets_mapping=True, return_tensors="pt") - offset_mapping = encoding.pop('offset_mapping') - - # forward pass - outputs = model(**encoding) - - # get predictions - predictions = outputs.logits.argmax(-1).squeeze().tolist() - token_boxes = encoding.bbox.squeeze().tolist() - - # only keep non-subword predictions - is_subword = np.array(offset_mapping.squeeze().tolist())[:,0] != 0 - true_predictions = [id2label[pred] for idx, pred in enumerate(predictions) if not is_subword[idx]] - true_boxes = [unnormalize_box(box, width, height) for idx, box in enumerate(token_boxes) if not is_subword[idx]] - - # draw predictions over the image - draw = ImageDraw.Draw(image) - font = ImageFont.load_default() - for prediction, box in zip(true_predictions, true_boxes): - predicted_label = iob_to_label(prediction) - draw.rectangle(box, outline=label2color[predicted_label]) - draw.text((box[0]+10, box[1]-10), text=predicted_label, fill=label2color[predicted_label], font=font) - - return image - - -title = "OCR Invoice - Information Extraction - LayoutLMv3" -description = "Fine-tuned Microsoft's LayoutLMv3 on WildReceipt Dataset to parse Invoice OCR document. To use it, simply upload an image or use the example image below. Results will show up in a few seconds." - -article="References
        [1] Y. Xu et al., “LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking.” 2022. Paper Link
        [2] LayoutLMv3 training and inference
        [3] Hongbin Sun, Zhanghui Kuang, Xiaoyu Yue, Chenhao Lin, and Wayne Zhang. 2021. Spatial Dual-Modality Graph Reasoning for Key Information Extraction. arXiv. DOI:https://doi.org/10.48550/ARXIV.2103.14470 Paper Link" - -examples =[['example1.png'],['example2.png'],['example3.png'],['inv2.jpg']] - -css = """.output_image, .input_image {height: 600px !important}""" - -iface = gr.Interface(fn=process_image, - inputs=gr.inputs.Image(type="pil"), - outputs=gr.outputs.Image(type="pil", label="annotated image"), - title=title, - description=description, - article=article, - examples=examples, - css=css, - analytics_enabled = True, enable_queue=True) - -iface.launch(inline=False, share=False, debug=True) \ No newline at end of file diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageQt.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageQt.py deleted file mode 100644 index 9b7245454dfcccb4e822a6634168d405c0e791bb..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageQt.py +++ /dev/null @@ -1,216 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# a simple Qt image interface. -# -# history: -# 2006-06-03 fl: created -# 2006-06-04 fl: inherit from QImage instead of wrapping it -# 2006-06-05 fl: removed toimage helper; move string support to ImageQt -# 2013-11-13 fl: add support for Qt5 (aurelien.ballier@cyclonit.com) -# -# Copyright (c) 2006 by Secret Labs AB -# Copyright (c) 2006 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - -import sys -from io import BytesIO - -from . import Image -from ._util import is_path - -qt_versions = [ - ["6", "PyQt6"], - ["side6", "PySide6"], -] - -# If a version has already been imported, attempt it first -qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True) -for qt_version, qt_module in qt_versions: - try: - if qt_module == "PyQt6": - from PyQt6.QtCore import QBuffer, QIODevice - from PyQt6.QtGui import QImage, QPixmap, qRgba - elif qt_module == "PySide6": - from PySide6.QtCore import QBuffer, QIODevice - from PySide6.QtGui import QImage, QPixmap, qRgba - except (ImportError, RuntimeError): - continue - qt_is_installed = True - break -else: - qt_is_installed = False - qt_version = None - - -def rgb(r, g, b, a=255): - """(Internal) Turns an RGB color into a Qt compatible color integer.""" - # use qRgb to pack the colors, and then turn the resulting long - # into a negative integer with the same bitpattern. - return qRgba(r, g, b, a) & 0xFFFFFFFF - - -def fromqimage(im): - """ - :param im: QImage or PIL ImageQt object - """ - buffer = QBuffer() - if qt_version == "6": - try: - qt_openmode = QIODevice.OpenModeFlag - except AttributeError: - qt_openmode = QIODevice.OpenMode - else: - qt_openmode = QIODevice - buffer.open(qt_openmode.ReadWrite) - # preserve alpha channel with png - # otherwise ppm is more friendly with Image.open - if im.hasAlphaChannel(): - im.save(buffer, "png") - else: - im.save(buffer, "ppm") - - b = BytesIO() - b.write(buffer.data()) - buffer.close() - b.seek(0) - - return Image.open(b) - - -def fromqpixmap(im): - return fromqimage(im) - # buffer = QBuffer() - # buffer.open(QIODevice.ReadWrite) - # # im.save(buffer) - # # What if png doesn't support some image features like animation? - # im.save(buffer, 'ppm') - # bytes_io = BytesIO() - # bytes_io.write(buffer.data()) - # buffer.close() - # bytes_io.seek(0) - # return Image.open(bytes_io) - - -def align8to32(bytes, width, mode): - """ - converts each scanline of data from 8 bit to 32 bit aligned - """ - - bits_per_pixel = {"1": 1, "L": 8, "P": 8, "I;16": 16}[mode] - - # calculate bytes per line and the extra padding if needed - bits_per_line = bits_per_pixel * width - full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8) - bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0) - - extra_padding = -bytes_per_line % 4 - - # already 32 bit aligned by luck - if not extra_padding: - return bytes - - new_data = [] - for i in range(len(bytes) // bytes_per_line): - new_data.append( - bytes[i * bytes_per_line : (i + 1) * bytes_per_line] - + b"\x00" * extra_padding - ) - - return b"".join(new_data) - - -def _toqclass_helper(im): - data = None - colortable = None - exclusive_fp = False - - # handle filename, if given instead of image name - if hasattr(im, "toUtf8"): - # FIXME - is this really the best way to do this? - im = str(im.toUtf8(), "utf-8") - if is_path(im): - im = Image.open(im) - exclusive_fp = True - - qt_format = QImage.Format if qt_version == "6" else QImage - if im.mode == "1": - format = qt_format.Format_Mono - elif im.mode == "L": - format = qt_format.Format_Indexed8 - colortable = [] - for i in range(256): - colortable.append(rgb(i, i, i)) - elif im.mode == "P": - format = qt_format.Format_Indexed8 - colortable = [] - palette = im.getpalette() - for i in range(0, len(palette), 3): - colortable.append(rgb(*palette[i : i + 3])) - elif im.mode == "RGB": - # Populate the 4th channel with 255 - im = im.convert("RGBA") - - data = im.tobytes("raw", "BGRA") - format = qt_format.Format_RGB32 - elif im.mode == "RGBA": - data = im.tobytes("raw", "BGRA") - format = qt_format.Format_ARGB32 - elif im.mode == "I;16" and hasattr(qt_format, "Format_Grayscale16"): # Qt 5.13+ - im = im.point(lambda i: i * 256) - - format = qt_format.Format_Grayscale16 - else: - if exclusive_fp: - im.close() - msg = f"unsupported image mode {repr(im.mode)}" - raise ValueError(msg) - - size = im.size - __data = data or align8to32(im.tobytes(), size[0], im.mode) - if exclusive_fp: - im.close() - return {"data": __data, "size": size, "format": format, "colortable": colortable} - - -if qt_is_installed: - - class ImageQt(QImage): - def __init__(self, im): - """ - An PIL image wrapper for Qt. This is a subclass of PyQt's QImage - class. - - :param im: A PIL Image object, or a file name (given either as - Python string or a PyQt string object). - """ - im_data = _toqclass_helper(im) - # must keep a reference, or Qt will crash! - # All QImage constructors that take data operate on an existing - # buffer, so this buffer has to hang on for the life of the image. - # Fixes https://github.com/python-pillow/Pillow/issues/1370 - self.__data = im_data["data"] - super().__init__( - self.__data, - im_data["size"][0], - im_data["size"][1], - im_data["format"], - ) - if im_data["colortable"]: - self.setColorTable(im_data["colortable"]) - - -def toqimage(im): - return ImageQt(im) - - -def toqpixmap(im): - # # This doesn't work. For now using a dumb approach. - # im_data = _toqclass_helper(im) - # result = QPixmap(im_data["size"][0], im_data["size"][1]) - # result.loadFromData(im_data["data"]) - qimage = toqimage(im) - return QPixmap.fromImage(qimage) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/web_exceptions.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/web_exceptions.py deleted file mode 100644 index ae706a1806299a1f13f3a905b4582c52bda5450c..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/web_exceptions.py +++ /dev/null @@ -1,441 +0,0 @@ -import warnings -from typing import Any, Dict, Iterable, List, Optional, Set # noqa - -from yarl import URL - -from .typedefs import LooseHeaders, StrOrURL -from .web_response import Response - -__all__ = ( - "HTTPException", - "HTTPError", - "HTTPRedirection", - "HTTPSuccessful", - "HTTPOk", - "HTTPCreated", - "HTTPAccepted", - "HTTPNonAuthoritativeInformation", - "HTTPNoContent", - "HTTPResetContent", - "HTTPPartialContent", - "HTTPMultipleChoices", - "HTTPMovedPermanently", - "HTTPFound", - "HTTPSeeOther", - "HTTPNotModified", - "HTTPUseProxy", - "HTTPTemporaryRedirect", - "HTTPPermanentRedirect", - "HTTPClientError", - "HTTPBadRequest", - "HTTPUnauthorized", - "HTTPPaymentRequired", - "HTTPForbidden", - "HTTPNotFound", - "HTTPMethodNotAllowed", - "HTTPNotAcceptable", - "HTTPProxyAuthenticationRequired", - "HTTPRequestTimeout", - "HTTPConflict", - "HTTPGone", - "HTTPLengthRequired", - "HTTPPreconditionFailed", - "HTTPRequestEntityTooLarge", - "HTTPRequestURITooLong", - "HTTPUnsupportedMediaType", - "HTTPRequestRangeNotSatisfiable", - "HTTPExpectationFailed", - "HTTPMisdirectedRequest", - "HTTPUnprocessableEntity", - "HTTPFailedDependency", - "HTTPUpgradeRequired", - "HTTPPreconditionRequired", - "HTTPTooManyRequests", - "HTTPRequestHeaderFieldsTooLarge", - "HTTPUnavailableForLegalReasons", - "HTTPServerError", - "HTTPInternalServerError", - "HTTPNotImplemented", - "HTTPBadGateway", - "HTTPServiceUnavailable", - "HTTPGatewayTimeout", - "HTTPVersionNotSupported", - "HTTPVariantAlsoNegotiates", - "HTTPInsufficientStorage", - "HTTPNotExtended", - "HTTPNetworkAuthenticationRequired", -) - - -############################################################ -# HTTP Exceptions -############################################################ - - -class HTTPException(Response, Exception): - - # You should set in subclasses: - # status = 200 - - status_code = -1 - empty_body = False - - __http_exception__ = True - - def __init__( - self, - *, - headers: Optional[LooseHeaders] = None, - reason: Optional[str] = None, - body: Any = None, - text: Optional[str] = None, - content_type: Optional[str] = None, - ) -> None: - if body is not None: - warnings.warn( - "body argument is deprecated for http web exceptions", - DeprecationWarning, - ) - Response.__init__( - self, - status=self.status_code, - headers=headers, - reason=reason, - body=body, - text=text, - content_type=content_type, - ) - Exception.__init__(self, self.reason) - if self.body is None and not self.empty_body: - self.text = f"{self.status}: {self.reason}" - - def __bool__(self) -> bool: - return True - - -class HTTPError(HTTPException): - """Base class for exceptions with status codes in the 400s and 500s.""" - - -class HTTPRedirection(HTTPException): - """Base class for exceptions with status codes in the 300s.""" - - -class HTTPSuccessful(HTTPException): - """Base class for exceptions with status codes in the 200s.""" - - -class HTTPOk(HTTPSuccessful): - status_code = 200 - - -class HTTPCreated(HTTPSuccessful): - status_code = 201 - - -class HTTPAccepted(HTTPSuccessful): - status_code = 202 - - -class HTTPNonAuthoritativeInformation(HTTPSuccessful): - status_code = 203 - - -class HTTPNoContent(HTTPSuccessful): - status_code = 204 - empty_body = True - - -class HTTPResetContent(HTTPSuccessful): - status_code = 205 - empty_body = True - - -class HTTPPartialContent(HTTPSuccessful): - status_code = 206 - - -############################################################ -# 3xx redirection -############################################################ - - -class _HTTPMove(HTTPRedirection): - def __init__( - self, - location: StrOrURL, - *, - headers: Optional[LooseHeaders] = None, - reason: Optional[str] = None, - body: Any = None, - text: Optional[str] = None, - content_type: Optional[str] = None, - ) -> None: - if not location: - raise ValueError("HTTP redirects need a location to redirect to.") - super().__init__( - headers=headers, - reason=reason, - body=body, - text=text, - content_type=content_type, - ) - self.headers["Location"] = str(URL(location)) - self.location = location - - -class HTTPMultipleChoices(_HTTPMove): - status_code = 300 - - -class HTTPMovedPermanently(_HTTPMove): - status_code = 301 - - -class HTTPFound(_HTTPMove): - status_code = 302 - - -# This one is safe after a POST (the redirected location will be -# retrieved with GET): -class HTTPSeeOther(_HTTPMove): - status_code = 303 - - -class HTTPNotModified(HTTPRedirection): - # FIXME: this should include a date or etag header - status_code = 304 - empty_body = True - - -class HTTPUseProxy(_HTTPMove): - # Not a move, but looks a little like one - status_code = 305 - - -class HTTPTemporaryRedirect(_HTTPMove): - status_code = 307 - - -class HTTPPermanentRedirect(_HTTPMove): - status_code = 308 - - -############################################################ -# 4xx client error -############################################################ - - -class HTTPClientError(HTTPError): - pass - - -class HTTPBadRequest(HTTPClientError): - status_code = 400 - - -class HTTPUnauthorized(HTTPClientError): - status_code = 401 - - -class HTTPPaymentRequired(HTTPClientError): - status_code = 402 - - -class HTTPForbidden(HTTPClientError): - status_code = 403 - - -class HTTPNotFound(HTTPClientError): - status_code = 404 - - -class HTTPMethodNotAllowed(HTTPClientError): - status_code = 405 - - def __init__( - self, - method: str, - allowed_methods: Iterable[str], - *, - headers: Optional[LooseHeaders] = None, - reason: Optional[str] = None, - body: Any = None, - text: Optional[str] = None, - content_type: Optional[str] = None, - ) -> None: - allow = ",".join(sorted(allowed_methods)) - super().__init__( - headers=headers, - reason=reason, - body=body, - text=text, - content_type=content_type, - ) - self.headers["Allow"] = allow - self.allowed_methods: Set[str] = set(allowed_methods) - self.method = method.upper() - - -class HTTPNotAcceptable(HTTPClientError): - status_code = 406 - - -class HTTPProxyAuthenticationRequired(HTTPClientError): - status_code = 407 - - -class HTTPRequestTimeout(HTTPClientError): - status_code = 408 - - -class HTTPConflict(HTTPClientError): - status_code = 409 - - -class HTTPGone(HTTPClientError): - status_code = 410 - - -class HTTPLengthRequired(HTTPClientError): - status_code = 411 - - -class HTTPPreconditionFailed(HTTPClientError): - status_code = 412 - - -class HTTPRequestEntityTooLarge(HTTPClientError): - status_code = 413 - - def __init__(self, max_size: float, actual_size: float, **kwargs: Any) -> None: - kwargs.setdefault( - "text", - "Maximum request body size {} exceeded, " - "actual body size {}".format(max_size, actual_size), - ) - super().__init__(**kwargs) - - -class HTTPRequestURITooLong(HTTPClientError): - status_code = 414 - - -class HTTPUnsupportedMediaType(HTTPClientError): - status_code = 415 - - -class HTTPRequestRangeNotSatisfiable(HTTPClientError): - status_code = 416 - - -class HTTPExpectationFailed(HTTPClientError): - status_code = 417 - - -class HTTPMisdirectedRequest(HTTPClientError): - status_code = 421 - - -class HTTPUnprocessableEntity(HTTPClientError): - status_code = 422 - - -class HTTPFailedDependency(HTTPClientError): - status_code = 424 - - -class HTTPUpgradeRequired(HTTPClientError): - status_code = 426 - - -class HTTPPreconditionRequired(HTTPClientError): - status_code = 428 - - -class HTTPTooManyRequests(HTTPClientError): - status_code = 429 - - -class HTTPRequestHeaderFieldsTooLarge(HTTPClientError): - status_code = 431 - - -class HTTPUnavailableForLegalReasons(HTTPClientError): - status_code = 451 - - def __init__( - self, - link: str, - *, - headers: Optional[LooseHeaders] = None, - reason: Optional[str] = None, - body: Any = None, - text: Optional[str] = None, - content_type: Optional[str] = None, - ) -> None: - super().__init__( - headers=headers, - reason=reason, - body=body, - text=text, - content_type=content_type, - ) - self.headers["Link"] = '<%s>; rel="blocked-by"' % link - self.link = link - - -############################################################ -# 5xx Server Error -############################################################ -# Response status codes beginning with the digit "5" indicate cases in -# which the server is aware that it has erred or is incapable of -# performing the request. Except when responding to a HEAD request, the -# server SHOULD include an entity containing an explanation of the error -# situation, and whether it is a temporary or permanent condition. User -# agents SHOULD display any included entity to the user. These response -# codes are applicable to any request method. - - -class HTTPServerError(HTTPError): - pass - - -class HTTPInternalServerError(HTTPServerError): - status_code = 500 - - -class HTTPNotImplemented(HTTPServerError): - status_code = 501 - - -class HTTPBadGateway(HTTPServerError): - status_code = 502 - - -class HTTPServiceUnavailable(HTTPServerError): - status_code = 503 - - -class HTTPGatewayTimeout(HTTPServerError): - status_code = 504 - - -class HTTPVersionNotSupported(HTTPServerError): - status_code = 505 - - -class HTTPVariantAlsoNegotiates(HTTPServerError): - status_code = 506 - - -class HTTPInsufficientStorage(HTTPServerError): - status_code = 507 - - -class HTTPNotExtended(HTTPServerError): - status_code = 510 - - -class HTTPNetworkAuthenticationRequired(HTTPServerError): - status_code = 511 diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/ffmpy.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/ffmpy.py deleted file mode 100644 index 03291fca55a355b3041b8538217f334e9c4332eb..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/ffmpy.py +++ /dev/null @@ -1,203 +0,0 @@ -import errno -import shlex -import subprocess - -__version__ = "0.3.1" - - -class FFmpeg(object): - """Wrapper for various `FFmpeg `_ related applications (ffmpeg, - ffprobe). - """ - - def __init__( - self, executable="ffmpeg", global_options=None, inputs=None, outputs=None - ): - """Initialize FFmpeg command line wrapper. - - Compiles FFmpeg command line from passed arguments (executable path, options, inputs and - outputs). ``inputs`` and ``outputs`` are dictionares containing inputs/outputs as keys and - their respective options as values. One dictionary value (set of options) must be either a - single space separated string, or a list or strings without spaces (i.e. each part of the - option is a separate item of the list, the result of calling ``split()`` on the options - string). If the value is a list, it cannot be mixed, i.e. cannot contain items with spaces. - An exception are complex FFmpeg command lines that contain quotes: the quoted part must be - one string, even if it contains spaces (see *Examples* for more info). - For more info about FFmpeg command line format see `here - `_. - - :param str executable: path to ffmpeg executable; by default the ``ffmpeg`` command will be - searched for in the ``PATH``, but can be overridden with an absolute path to ``ffmpeg`` - executable - :param iterable global_options: global options passed to ``ffmpeg`` executable (e.g. - ``-y``, ``-v`` etc.); can be specified either as a list/tuple/set of strings, or one - space-separated string; by default no global options are passed - :param dict inputs: a dictionary specifying one or more input arguments as keys with their - corresponding options (either as a list of strings or a single space separated string) as - values - :param dict outputs: a dictionary specifying one or more output arguments as keys with their - corresponding options (either as a list of strings or a single space separated string) as - values - """ - self.executable = executable - self._cmd = [executable] - - global_options = global_options or [] - if _is_sequence(global_options): - normalized_global_options = [] - for opt in global_options: - normalized_global_options += shlex.split(opt) - else: - normalized_global_options = shlex.split(global_options) - - self._cmd += normalized_global_options - self._cmd += _merge_args_opts(inputs, add_input_option=True) - self._cmd += _merge_args_opts(outputs) - - self.cmd = subprocess.list2cmdline(self._cmd) - self.process = None - - def __repr__(self): - return "<{0!r} {1!r}>".format(self.__class__.__name__, self.cmd) - - def run(self, input_data=None, stdout=None, stderr=None, env=None, **kwargs): - """Execute FFmpeg command line. - - ``input_data`` can contain input for FFmpeg in case ``pipe`` protocol is used for input. - ``stdout`` and ``stderr`` specify where to redirect the ``stdout`` and ``stderr`` of the - process. By default no redirection is done, which means all output goes to running shell - (this mode should normally only be used for debugging purposes). If FFmpeg ``pipe`` protocol - is used for output, ``stdout`` must be redirected to a pipe by passing `subprocess.PIPE` as - ``stdout`` argument. You can pass custom environment to ffmpeg process with ``env``. - - Returns a 2-tuple containing ``stdout`` and ``stderr`` of the process. If there was no - redirection or if the output was redirected to e.g. `os.devnull`, the value returned will - be a tuple of two `None` values, otherwise it will contain the actual ``stdout`` and - ``stderr`` data returned by ffmpeg process. - - More info about ``pipe`` protocol `here `_. - - :param str input_data: input data for FFmpeg to deal with (audio, video etc.) as bytes (e.g. - the result of reading a file in binary mode) - :param stdout: redirect FFmpeg ``stdout`` there (default is `None` which means no - redirection) - :param stderr: redirect FFmpeg ``stderr`` there (default is `None` which means no - redirection) - :param env: custom environment for ffmpeg process - :param kwargs: any other keyword arguments to be forwarded to `subprocess.Popen - `_ - :return: a 2-tuple containing ``stdout`` and ``stderr`` of the process - :rtype: tuple - :raise: `FFRuntimeError` in case FFmpeg command exits with a non-zero code; - `FFExecutableNotFoundError` in case the executable path passed was not valid - """ - try: - self.process = subprocess.Popen( - self._cmd, - stdin=subprocess.PIPE, - stdout=stdout, - stderr=stderr, - env=env, - **kwargs - ) - except OSError as e: - if e.errno == errno.ENOENT: - raise FFExecutableNotFoundError( - "Executable '{0}' not found".format(self.executable) - ) - else: - raise - - out = self.process.communicate(input=input_data) - if self.process.returncode != 0: - raise FFRuntimeError(self.cmd, self.process.returncode, out[0], out[1]) - - return out - - -class FFprobe(FFmpeg): - """Wrapper for `ffprobe `_.""" - - def __init__(self, executable="ffprobe", global_options="", inputs=None): - """Create an instance of FFprobe. - - Compiles FFprobe command line from passed arguments (executable path, options, inputs). - FFprobe executable by default is taken from ``PATH`` but can be overridden with an - absolute path. For more info about FFprobe command line format see - `here `_. - - :param str executable: absolute path to ffprobe executable - :param iterable global_options: global options passed to ffmpeg executable; can be specified - either as a list/tuple of strings or a space-separated string - :param dict inputs: a dictionary specifying one or more inputs as keys with their - corresponding options as values - """ - super(FFprobe, self).__init__( - executable=executable, global_options=global_options, inputs=inputs - ) - - -class FFExecutableNotFoundError(Exception): - """Raise when FFmpeg/FFprobe executable was not found.""" - - -class FFRuntimeError(Exception): - """Raise when FFmpeg/FFprobe command line execution returns a non-zero exit code. - - The resulting exception object will contain the attributes relates to command line execution: - ``cmd``, ``exit_code``, ``stdout``, ``stderr``. - """ - - def __init__(self, cmd, exit_code, stdout, stderr): - self.cmd = cmd - self.exit_code = exit_code - self.stdout = stdout - self.stderr = stderr - - message = "`{0}` exited with status {1}\n\nSTDOUT:\n{2}\n\nSTDERR:\n{3}".format( - self.cmd, exit_code, (stdout or b"").decode(), (stderr or b"").decode() - ) - - super(FFRuntimeError, self).__init__(message) - - -def _is_sequence(obj): - """Check if the object is a sequence (list, tuple etc.). - - :param object obj: an object to be checked - :return: True if the object is iterable but is not a string, False otherwise - :rtype: bool - """ - return hasattr(obj, "__iter__") and not isinstance(obj, str) - - -def _merge_args_opts(args_opts_dict, **kwargs): - """Merge options with their corresponding arguments. - - Iterates over the dictionary holding arguments (keys) and options (values). Merges each - options string with its corresponding argument. - - :param dict args_opts_dict: a dictionary of arguments and options - :param dict kwargs: *input_option* - if specified prepends ``-i`` to input argument - :return: merged list of strings with arguments and their corresponding options - :rtype: list - """ - merged = [] - - if not args_opts_dict: - return merged - - for arg, opt in args_opts_dict.items(): - if not _is_sequence(opt): - opt = shlex.split(opt or "") - merged += opt - - if not arg: - continue - - if "add_input_option" in kwargs: - merged.append("-i") - - merged.append(arg) - - return merged diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/vector_stores/opensearch.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/vector_stores/opensearch.py deleted file mode 100644 index ad57c6aca1d7192f9eba665bb5cf908373ea9bae..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/vector_stores/opensearch.py +++ /dev/null @@ -1,204 +0,0 @@ -"""Elasticsearch/Opensearch vector store.""" -import json -from typing import Any, Dict, List, Optional - -from gpt_index.data_structs import Node -from gpt_index.vector_stores.types import ( - NodeEmbeddingResult, - VectorStore, - VectorStoreQueryResult, -) - - -class OpensearchVectorClient: - """Object encapsulating an Opensearch index that has vector search enabled. - - If the index does not yet exist, it is created during init. - Therefore, the underlying index is assumed to either: - 1) not exist yet or 2) be created due to previous usage of this class. - - Args: - endpoint (str): URL (http/https) of elasticsearch endpoint - index (str): Name of the elasticsearch index - dim (int): Dimension of the vector - embedding_field (str): Name of the field in the index to store - embedding array in. - text_field (str): Name of the field to grab text from - method (Optional[dict]): Opensearch "method" JSON obj for configuring - the KNN index. - This includes engine, metric, and other config params. Defaults to: - {"name": "hnsw", "space_type": "l2", "engine": "faiss", - "parameters": {"ef_construction": 256, "m": 48}} - - """ - - def __init__( - self, - endpoint: str, - index: str, - dim: int, - embedding_field: str = "embedding", - text_field: str = "content", - method: Optional[dict] = None, - ): - """Init params.""" - if method is None: - method = { - "name": "hnsw", - "space_type": "l2", - "engine": "nmslib", - "parameters": {"ef_construction": 256, "m": 48}, - } - import_err_msg = "`httpx` package not found, please run `pip install httpx`" - if embedding_field is None: - embedding_field = "embedding" - try: - import httpx # noqa: F401 - except ImportError: - raise ImportError(import_err_msg) - self._embedding_field = embedding_field - self._client = httpx.Client(base_url=endpoint) - self._endpoint = endpoint - self._dim = dim - self._index = index - self._text_field = text_field - # initialize mapping - idx_conf = { - "settings": {"index": {"knn": True, "knn.algo_param.ef_search": 100}}, - "mappings": { - "properties": { - embedding_field: { - "type": "knn_vector", - "dimension": dim, - "method": method, - }, - } - }, - } - res = self._client.put(f"/{self._index}", json=idx_conf) - # will 400 if the index already existed, so allow 400 errors right here - assert res.status_code == 200 or res.status_code == 400 - - def index_results(self, results: List[NodeEmbeddingResult]) -> List[str]: - """Store results in the index.""" - bulk_req: List[Dict[Any, Any]] = [] - for result in results: - bulk_req.append({"index": {"_index": self._index, "_id": result.id}}) - bulk_req.append( - { - self._text_field: result.node.text, - self._embedding_field: result.embedding, - } - ) - bulk = "\n".join([json.dumps(v) for v in bulk_req]) + "\n" - res = self._client.post( - "/_bulk", headers={"Content-Type": "application/x-ndjson"}, content=bulk - ) - assert res.status_code == 200 - assert not res.json()["errors"], "expected no errors while indexing docs" - return [r.id for r in results] - - def delete_doc_id(self, doc_id: str) -> None: - """Delete a document. - - Args: - doc_id (str): document id - """ - self._client.delete(f"{self._index}/_doc/{doc_id}") - - def do_approx_knn( - self, query_embedding: List[float], k: int - ) -> VectorStoreQueryResult: - """Do approximate knn.""" - res = self._client.post( - f"{self._index}/_search", - json={ - "size": k, - "query": { - "knn": {self._embedding_field: {"vector": query_embedding, "k": k}} - }, - }, - ) - nodes = [] - ids = [] - scores = [] - for hit in res.json()["hits"]["hits"]: - source = hit["_source"] - text = source[self._text_field] - doc_id = hit["_id"] - node = Node(text=text, extra_info=source, doc_id=doc_id) - ids.append(doc_id) - nodes.append(node) - scores.append(hit["_score"]) - return VectorStoreQueryResult(nodes=nodes, ids=ids, similarities=scores) - - -class OpensearchVectorStore(VectorStore): - """Elasticsearch/Opensearch vector store. - - Args: - client (OpensearchVectorClient): Vector index client to use - for data insertion/querying. - - """ - - stores_text: bool = True - - def __init__( - self, - client: OpensearchVectorClient, - ) -> None: - """Initialize params.""" - import_err_msg = "`httpx` package not found, please run `pip install httpx`" - try: - import httpx # noqa: F401 - except ImportError: - raise ImportError(import_err_msg) - self._client = client - - @property - def client(self) -> Any: - """Get client.""" - return self._client - - @property - def config_dict(self) -> dict: - """Get config dict.""" - return {} - - def add( - self, - embedding_results: List[NodeEmbeddingResult], - ) -> List[str]: - """Add embedding results to index. - - Args - embedding_results: List[NodeEmbeddingResult]: list of embedding results - - """ - self._client.index_results(embedding_results) - return [result.id for result in embedding_results] - - def delete(self, doc_id: str, **delete_kwargs: Any) -> None: - """Delete a document. - - Args: - doc_id (str): document id - - """ - self._client.delete_doc_id(doc_id) - - def query( - self, - query_embedding: List[float], - similarity_top_k: int, - doc_ids: Optional[List[str]] = None, - ) -> VectorStoreQueryResult: - """Query index for top k most similar nodes. - - Args: - query_embedding (List[float]): query embedding - similarity_top_k (int): top k most similar nodes - - """ - return self._client.do_approx_knn(query_embedding, similarity_top_k) diff --git a/spaces/jone/Music_Source_Separation/scripts/0_download_datasets/instruments.sh b/spaces/jone/Music_Source_Separation/scripts/0_download_datasets/instruments.sh deleted file mode 100644 index a848adbe45957923c47bc3047c33958a1421c8f6..0000000000000000000000000000000000000000 --- a/spaces/jone/Music_Source_Separation/scripts/0_download_datasets/instruments.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash - -echo "The dataset link is created internally by kqq" - -# The downloaded MAESTRO dataset looks like: -# ./datasets/instruments -# ├── violin_solo -# │ └── v0.1 -# │ ├── mp3s (12 files) -# │ │ ├── 0jXXWBt5URw.mp3 -# │ │ └── ... -# │ ├── README.txt -# │ └── validation.csv -# ├── basson_solo -# │ └── ... -# ├── cello_solo -# │ └── ... -# ├── clarinet_solo -# │ └── ... -# ├── flute_solo -# │ └── ... -# ├── harp_solo -# │ └── ... -# ├── horn_solo -# │ └── ... -# ├── oboe_solo -# │ └── ... -# ├── saxophone_solo -# │ └── ... -# ├── string_quartet -# │ └── ... -# ├── symphony_solo -# │ └── ... -# ├── timpani_solo -# │ └── ... -# ├── trombone_solo -# │ └── ... -# ├── trumpet_solo -# │ └── ... -# ├── tuba_solo -# │ └── ... -# └── viola_solo -# └── ... \ No newline at end of file diff --git a/spaces/jpfearnworks/ai_agents/modules/llm/defaults.py b/spaces/jpfearnworks/ai_agents/modules/llm/defaults.py deleted file mode 100644 index 87db2f0c04fe1c673afdd3d95c355bbe9d0ed8ab..0000000000000000000000000000000000000000 --- a/spaces/jpfearnworks/ai_agents/modules/llm/defaults.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -from langchain import OpenAI -from langchain.chat_models import ChatOpenAI -OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') - - - -def get_default_cloud_chat_llm(): - """ - Returns a default LLM instance with the OpenAI API key set in the environment. - - Returns: - OpenAI: A new OpenAI instance. - """ - llm = ChatOpenAI(model="gpt-3.5-turbo", openai_api_key=OPENAI_API_KEY, temperature=0) - return llm - -def get_default_cloud_completion_llm(): - """ - Returns a default LLM instance with the OpenAI API key set in the environment. - - Returns: - OpenAI: A new OpenAI instance. - """ - llm = OpenAI(openai_api_key=OPENAI_API_KEY) - return llm - -def get_default_local_llm(): - """ - Coming soon! - """ - pass \ No newline at end of file diff --git a/spaces/julien-c/push-model-from-web/README.md b/spaces/julien-c/push-model-from-web/README.md deleted file mode 100644 index dc7dc082249979dd3953e92fb3e03b34f14b6afb..0000000000000000000000000000000000000000 --- a/spaces/julien-c/push-model-from-web/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Push Model From Web -emoji: 🌎 -colorFrom: green -colorTo: green -sdk: static -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kdrkdrkdr/YuukaTTS/monotonic_align/__init__.py b/spaces/kdrkdrkdr/YuukaTTS/monotonic_align/__init__.py deleted file mode 100644 index 40b6f64aa116c74cac2f6a33444c9eeea2fdb38c..0000000000000000000000000000000000000000 --- a/spaces/kdrkdrkdr/YuukaTTS/monotonic_align/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -from numpy import zeros, int32, float32 -from torch import from_numpy - -from .core import maximum_path_jit - - -def maximum_path(neg_cent, mask): - """ numba optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(float32) - path = zeros(neg_cent.shape, dtype=int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) - maximum_path_jit(path, neg_cent, t_t_max, t_s_max) - return from_numpy(path).to(device=device, dtype=dtype) - diff --git a/spaces/keras-io/bert-semantic-similarity/README.md b/spaces/keras-io/bert-semantic-similarity/README.md deleted file mode 100644 index 2a04eb1e8bafa81ea45305e3d34b0b15369ece13..0000000000000000000000000000000000000000 --- a/spaces/keras-io/bert-semantic-similarity/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Semantic Similarity with BERT -emoji: 🌇🌆 -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.0.24 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/keremberke/blood-cell-object-detection/app.py b/spaces/keremberke/blood-cell-object-detection/app.py deleted file mode 100644 index f1e71e99fd806d0a3359ac71fa3f3bc123a3b669..0000000000000000000000000000000000000000 --- a/spaces/keremberke/blood-cell-object-detection/app.py +++ /dev/null @@ -1,53 +0,0 @@ - -import json -import gradio as gr -import yolov5 -from PIL import Image -from huggingface_hub import hf_hub_download - -app_title = "Blood Cell Object Detection" -models_ids = ['keremberke/yolov5n-blood-cell', 'keremberke/yolov5s-blood-cell', 'keremberke/yolov5m-blood-cell'] -article = f"

        model | dataset | awesome-yolov5-models

        " - -current_model_id = models_ids[-1] -model = yolov5.load(current_model_id) - -examples = [['test_images/BloodImage_00004_jpg.rf.32f80737b874b0728582d77e7c409dd5.jpg', 0.25, 'keremberke/yolov5m-blood-cell'], ['test_images/BloodImage_00071_jpg.rf.4eaf043df89d110a17821cd2739cf9c8.jpg', 0.25, 'keremberke/yolov5m-blood-cell'], ['test_images/BloodImage_00182_jpg.rf.166c2fcd2f192794d6b68051171fe261.jpg', 0.25, 'keremberke/yolov5m-blood-cell'], ['test_images/BloodImage_00259_jpg.rf.fbe6e4480e60c75a0f01ad7b8b367262.jpg', 0.25, 'keremberke/yolov5m-blood-cell'], ['test_images/BloodImage_00274_jpg.rf.86d08e08eb6ca331175699cc1ef1ce07.jpg', 0.25, 'keremberke/yolov5m-blood-cell'], ['test_images/BloodImage_00296_jpg.rf.6a50b9decfd0cde034af85c72b5f2c9c.jpg', 0.25, 'keremberke/yolov5m-blood-cell']] - - -def predict(image, threshold=0.25, model_id=None): - # update model if required - global current_model_id - global model - if model_id != current_model_id: - model = yolov5.load(model_id) - current_model_id = model_id - - # get model input size - config_path = hf_hub_download(repo_id=model_id, filename="config.json") - with open(config_path, "r") as f: - config = json.load(f) - input_size = config["input_size"] - - # perform inference - model.conf = threshold - results = model(image, size=input_size) - numpy_image = results.render()[0] - output_image = Image.fromarray(numpy_image) - return output_image - - -gr.Interface( - title=app_title, - description="Created by 'keremberke'", - article=article, - fn=predict, - inputs=[ - gr.Image(type="pil"), - gr.Slider(maximum=1, step=0.01, value=0.25), - gr.Dropdown(models_ids, value=models_ids[-1]), - ], - outputs=gr.Image(type="pil"), - examples=examples, - cache_examples=True if examples else False, -).launch(enable_queue=True) diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/backbones/__init__.py b/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/backbones/__init__.py deleted file mode 100644 index 55bd4c5d1889a1a998b52eb56793bbc1eef1b691..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/backbones/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .iresnet import iresnet18, iresnet34, iresnet50, iresnet100, iresnet200 -from .mobilefacenet import get_mbf - - -def get_model(name, **kwargs): - # resnet - if name == "r18": - return iresnet18(False, **kwargs) - elif name == "r34": - return iresnet34(False, **kwargs) - elif name == "r50": - return iresnet50(False, **kwargs) - elif name == "r100": - return iresnet100(False, **kwargs) - elif name == "r200": - return iresnet200(False, **kwargs) - elif name == "r2060": - from .iresnet2060 import iresnet2060 - return iresnet2060(False, **kwargs) - elif name == "mbf": - fp16 = kwargs.get("fp16", False) - num_features = kwargs.get("num_features", 512) - return get_mbf(fp16=fp16, num_features=num_features) - else: - raise ValueError() \ No newline at end of file diff --git a/spaces/kevinwang676/SadTalker/src/face3d/models/__init__.py b/spaces/kevinwang676/SadTalker/src/face3d/models/__init__.py deleted file mode 100644 index 5a7986c7ad2ec48f404adf81fea5aa06aaf1eeb4..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/SadTalker/src/face3d/models/__init__.py +++ /dev/null @@ -1,67 +0,0 @@ -"""This package contains modules related to objective functions, optimizations, and network architectures. - -To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel. -You need to implement the following five functions: - -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). - -- : unpack data from dataset and apply preprocessing. - -- : produce intermediate results. - -- : calculate loss, gradients, and update network weights. - -- : (optionally) add model-specific options and set default options. - -In the function <__init__>, you need to define four lists: - -- self.loss_names (str list): specify the training losses that you want to plot and save. - -- self.model_names (str list): define networks used in our training. - -- self.visual_names (str list): specify the images that you want to display and save. - -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage. - -Now you can use the model class by specifying flag '--model dummy'. -See our template model class 'template_model.py' for more details. -""" - -import importlib -from src.face3d.models.base_model import BaseModel - - -def find_model_using_name(model_name): - """Import the module "models/[model_name]_model.py". - - In the file, the class called DatasetNameModel() will - be instantiated. It has to be a subclass of BaseModel, - and it is case-insensitive. - """ - model_filename = "face3d.models." + model_name + "_model" - modellib = importlib.import_module(model_filename) - model = None - target_model_name = model_name.replace('_', '') + 'model' - for name, cls in modellib.__dict__.items(): - if name.lower() == target_model_name.lower() \ - and issubclass(cls, BaseModel): - model = cls - - if model is None: - print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) - exit(0) - - return model - - -def get_option_setter(model_name): - """Return the static method of the model class.""" - model_class = find_model_using_name(model_name) - return model_class.modify_commandline_options - - -def create_model(opt): - """Create a model given the option. - - This function warps the class CustomDatasetDataLoader. - This is the main interface between this package and 'train.py'/'test.py' - - Example: - >>> from models import create_model - >>> model = create_model(opt) - """ - model = find_model_using_name(opt.model) - instance = model(opt) - print("model [%s] was created" % type(instance).__name__) - return instance diff --git a/spaces/kevinwang676/VoiceChanger/infer_pack/models_onnx.py b/spaces/kevinwang676/VoiceChanger/infer_pack/models_onnx.py deleted file mode 100644 index b945eac8e59aac38fbd166da49eda01e2b8f4bd4..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChanger/infer_pack/models_onnx.py +++ /dev/null @@ -1,818 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMsNSFsidM(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - if self.gin_channels == 256: - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - else: - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - self.speaker_map = None - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def construct_spkmixmap(self, n_speaker): - self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels)) - for i in range(n_speaker): - self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]])) - self.speaker_map = self.speaker_map.unsqueeze(0) - - def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None): - if self.speaker_map is not None: # [N, S] * [S, B, 1, H] - g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1] - g = g * self.speaker_map # [N, S, B, 1, H] - g = torch.sum(g, dim=1) # [N, 1, B, 1, H] - g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N] - else: - g = g.unsqueeze(0) - g = self.emb_g(g).transpose(1, 2) - - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/utils/logging.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/utils/logging.py deleted file mode 100644 index 4aa0e04bb9b3ab2a4bfbc4def50404ccbac2c6e6..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/utils/logging.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import logging - -import torch.distributed as dist - -logger_initialized = {} - - -def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'): - """Initialize and get a logger by name. - - If the logger has not been initialized, this method will initialize the - logger by adding one or two handlers, otherwise the initialized logger will - be directly returned. During initialization, a StreamHandler will always be - added. If `log_file` is specified and the process rank is 0, a FileHandler - will also be added. - - Args: - name (str): Logger name. - log_file (str | None): The log filename. If specified, a FileHandler - will be added to the logger. - log_level (int): The logger level. Note that only the process of - rank 0 is affected, and other processes will set the level to - "Error" thus be silent most of the time. - file_mode (str): The file mode used in opening log file. - Defaults to 'w'. - - Returns: - logging.Logger: The expected logger. - """ - logger = logging.getLogger(name) - if name in logger_initialized: - return logger - # handle hierarchical names - # e.g., logger "a" is initialized, then logger "a.b" will skip the - # initialization since it is a child of "a". - for logger_name in logger_initialized: - if name.startswith(logger_name): - return logger - - # handle duplicate logs to the console - # Starting in 1.8.0, PyTorch DDP attaches a StreamHandler (NOTSET) - # to the root logger. As logger.propagate is True by default, this root - # level handler causes logging messages from rank>0 processes to - # unexpectedly show up on the console, creating much unwanted clutter. - # To fix this issue, we set the root logger's StreamHandler, if any, to log - # at the ERROR level. - for handler in logger.root.handlers: - if type(handler) is logging.StreamHandler: - handler.setLevel(logging.ERROR) - - stream_handler = logging.StreamHandler() - handlers = [stream_handler] - - if dist.is_available() and dist.is_initialized(): - rank = dist.get_rank() - else: - rank = 0 - - # only rank 0 will add a FileHandler - if rank == 0 and log_file is not None: - # Here, the default behaviour of the official logger is 'a'. Thus, we - # provide an interface to change the file mode to the default - # behaviour. - file_handler = logging.FileHandler(log_file, file_mode) - handlers.append(file_handler) - - formatter = logging.Formatter( - '%(asctime)s - %(name)s - %(levelname)s - %(message)s') - for handler in handlers: - handler.setFormatter(formatter) - handler.setLevel(log_level) - logger.addHandler(handler) - - if rank == 0: - logger.setLevel(log_level) - else: - logger.setLevel(logging.ERROR) - - logger_initialized[name] = True - - return logger - - -def print_log(msg, logger=None, level=logging.INFO): - """Print a log message. - - Args: - msg (str): The message to be logged. - logger (logging.Logger | str | None): The logger to be used. - Some special loggers are: - - "silent": no message will be printed. - - other str: the logger obtained with `get_root_logger(logger)`. - - None: The `print()` method will be used to print log messages. - level (int): Logging level. Only available when `logger` is a Logger - object or "root". - """ - if logger is None: - print(msg) - elif isinstance(logger, logging.Logger): - logger.log(level, msg) - elif logger == 'silent': - pass - elif isinstance(logger, str): - _logger = get_logger(logger) - _logger.log(level, msg) - else: - raise TypeError( - 'logger should be either a logging.Logger object, str, ' - f'"silent" or None, but got {type(logger)}') diff --git a/spaces/krushna/Auto_Insta_Post-V2/README.md b/spaces/krushna/Auto_Insta_Post-V2/README.md deleted file mode 100644 index 36d9363a3e527127997f4c2bc79cc23b2cfcf7cb..0000000000000000000000000000000000000000 --- a/spaces/krushna/Auto_Insta_Post-V2/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Auto Insta Post-V2 -emoji: 💻 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kukuhtw/VToonify/vtoonify/style_transfer.py b/spaces/kukuhtw/VToonify/vtoonify/style_transfer.py deleted file mode 100644 index 3e6ba13ca84dc595dfa9eb9ef85a638889d8cdd3..0000000000000000000000000000000000000000 --- a/spaces/kukuhtw/VToonify/vtoonify/style_transfer.py +++ /dev/null @@ -1,232 +0,0 @@ -import os -#os.environ['CUDA_VISIBLE_DEVICES'] = "0" -import argparse -import numpy as np -import cv2 -import dlib -import torch -from torchvision import transforms -import torch.nn.functional as F -from tqdm import tqdm -from model.vtoonify import VToonify -from model.bisenet.model import BiSeNet -from model.encoder.align_all_parallel import align_face -from util import save_image, load_image, visualize, load_psp_standalone, get_video_crop_parameter, tensor2cv2 - - -class TestOptions(): - def __init__(self): - - self.parser = argparse.ArgumentParser(description="Style Transfer") - self.parser.add_argument("--content", type=str, default='./data/077436.jpg', help="path of the content image/video") - self.parser.add_argument("--style_id", type=int, default=26, help="the id of the style image") - self.parser.add_argument("--style_degree", type=float, default=0.5, help="style degree for VToonify-D") - self.parser.add_argument("--color_transfer", action="store_true", help="transfer the color of the style") - self.parser.add_argument("--ckpt", type=str, default='./checkpoint/vtoonify_d_cartoon/vtoonify_s_d.pt', help="path of the saved model") - self.parser.add_argument("--output_path", type=str, default='./output/', help="path of the output images") - self.parser.add_argument("--scale_image", action="store_true", help="resize and crop the image to best fit the model") - self.parser.add_argument("--style_encoder_path", type=str, default='./checkpoint/encoder.pt', help="path of the style encoder") - self.parser.add_argument("--exstyle_path", type=str, default=None, help="path of the extrinsic style code") - self.parser.add_argument("--faceparsing_path", type=str, default='./checkpoint/faceparsing.pth', help="path of the face parsing model") - self.parser.add_argument("--video", action="store_true", help="if true, video stylization; if false, image stylization") - self.parser.add_argument("--cpu", action="store_true", help="if true, only use cpu") - self.parser.add_argument("--backbone", type=str, default='dualstylegan', help="dualstylegan | toonify") - self.parser.add_argument("--padding", type=int, nargs=4, default=[200,200,200,200], help="left, right, top, bottom paddings to the face center") - self.parser.add_argument("--batch_size", type=int, default=4, help="batch size of frames when processing video") - self.parser.add_argument("--parsing_map_path", type=str, default=None, help="path of the refined parsing map of the target video") - - def parse(self): - self.opt = self.parser.parse_args() - if self.opt.exstyle_path is None: - self.opt.exstyle_path = os.path.join(os.path.dirname(self.opt.ckpt), 'exstyle_code.npy') - args = vars(self.opt) - print('Load options') - for name, value in sorted(args.items()): - print('%s: %s' % (str(name), str(value))) - return self.opt - -if __name__ == "__main__": - - parser = TestOptions() - args = parser.parse() - print('*'*98) - - - device = "cpu" if args.cpu else "cuda" - - transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]), - ]) - - vtoonify = VToonify(backbone = args.backbone) - vtoonify.load_state_dict(torch.load(args.ckpt, map_location=lambda storage, loc: storage)['g_ema']) - vtoonify.to(device) - - parsingpredictor = BiSeNet(n_classes=19) - parsingpredictor.load_state_dict(torch.load(args.faceparsing_path, map_location=lambda storage, loc: storage)) - parsingpredictor.to(device).eval() - - modelname = './checkpoint/shape_predictor_68_face_landmarks.dat' - if not os.path.exists(modelname): - import wget, bz2 - wget.download('http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2', modelname+'.bz2') - zipfile = bz2.BZ2File(modelname+'.bz2') - data = zipfile.read() - open(modelname, 'wb').write(data) - landmarkpredictor = dlib.shape_predictor(modelname) - - pspencoder = load_psp_standalone(args.style_encoder_path, device) - - if args.backbone == 'dualstylegan': - exstyles = np.load(args.exstyle_path, allow_pickle='TRUE').item() - stylename = list(exstyles.keys())[args.style_id] - exstyle = torch.tensor(exstyles[stylename]).to(device) - with torch.no_grad(): - exstyle = vtoonify.zplus2wplus(exstyle) - - if args.video and args.parsing_map_path is not None: - x_p_hat = torch.tensor(np.load(args.parsing_map_path)) - - print('Load models successfully!') - - - filename = args.content - basename = os.path.basename(filename).split('.')[0] - scale = 1 - kernel_1d = np.array([[0.125],[0.375],[0.375],[0.125]]) - print('Processing ' + os.path.basename(filename) + ' with vtoonify_' + args.backbone[0]) - if args.video: - cropname = os.path.join(args.output_path, basename + '_input.mp4') - savename = os.path.join(args.output_path, basename + '_vtoonify_' + args.backbone[0] + '.mp4') - - video_cap = cv2.VideoCapture(filename) - num = int(video_cap.get(7)) - - first_valid_frame = True - batch_frames = [] - for i in tqdm(range(num)): - success, frame = video_cap.read() - if success == False: - assert('load video frames error') - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - # We proprocess the video by detecting the face in the first frame, - # and resizing the frame so that the eye distance is 64 pixels. - # Centered on the eyes, we crop the first frame to almost 400x400 (based on args.padding). - # All other frames use the same resizing and cropping parameters as the first frame. - if first_valid_frame: - if args.scale_image: - paras = get_video_crop_parameter(frame, landmarkpredictor, args.padding) - if paras is None: - continue - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - # for HR video, we apply gaussian blur to the frames to avoid flickers caused by bilinear downsampling - # this can also prevent over-sharp stylization results. - if scale <= 0.75: - frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) - if scale <= 0.375: - frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - else: - H, W = frame.shape[0], frame.shape[1] - - fourcc = cv2.VideoWriter_fourcc(*'mp4v') - videoWriter = cv2.VideoWriter(cropname, fourcc, video_cap.get(5), (W, H)) - videoWriter2 = cv2.VideoWriter(savename, fourcc, video_cap.get(5), (4*W, 4*H)) - - # For each video, we detect and align the face in the first frame for pSp to obtain the style code. - # This style code is used for all other frames. - with torch.no_grad(): - I = align_face(frame, landmarkpredictor) - I = transform(I).unsqueeze(dim=0).to(device) - s_w = pspencoder(I) - s_w = vtoonify.zplus2wplus(s_w) - if vtoonify.backbone == 'dualstylegan': - if args.color_transfer: - s_w = exstyle - else: - s_w[:,:7] = exstyle[:,:7] - first_valid_frame = False - elif args.scale_image: - if scale <= 0.75: - frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) - if scale <= 0.375: - frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - - videoWriter.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)) - - batch_frames += [transform(frame).unsqueeze(dim=0).to(device)] - - if len(batch_frames) == args.batch_size or (i+1) == num: - x = torch.cat(batch_frames, dim=0) - batch_frames = [] - with torch.no_grad(): - # parsing network works best on 512x512 images, so we predict parsing maps on upsmapled frames - # followed by downsampling the parsing maps - if args.video and args.parsing_map_path is not None: - x_p = x_p_hat[i+1-x.size(0):i+1].to(device) - else: - x_p = F.interpolate(parsingpredictor(2*(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)))[0], - scale_factor=0.5, recompute_scale_factor=False).detach() - # we give parsing maps lower weight (1/16) - inputs = torch.cat((x, x_p/16.), dim=1) - # d_s has no effect when backbone is toonify - y_tilde = vtoonify(inputs, s_w.repeat(inputs.size(0), 1, 1), d_s = args.style_degree) - y_tilde = torch.clamp(y_tilde, -1, 1) - for k in range(y_tilde.size(0)): - videoWriter2.write(tensor2cv2(y_tilde[k].cpu())) - - videoWriter.release() - videoWriter2.release() - video_cap.release() - - - else: - cropname = os.path.join(args.output_path, basename + '_input.jpg') - savename = os.path.join(args.output_path, basename + '_vtoonify_' + args.backbone[0] + '.jpg') - - frame = cv2.imread(filename) - frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) - - # We detect the face in the image, and resize the image so that the eye distance is 64 pixels. - # Centered on the eyes, we crop the image to almost 400x400 (based on args.padding). - if args.scale_image: - paras = get_video_crop_parameter(frame, landmarkpredictor, args.padding) - if paras is not None: - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - # for HR image, we apply gaussian blur to it to avoid over-sharp stylization results - if scale <= 0.75: - frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) - if scale <= 0.375: - frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - - with torch.no_grad(): - I = align_face(frame, landmarkpredictor) - I = transform(I).unsqueeze(dim=0).to(device) - s_w = pspencoder(I) - s_w = vtoonify.zplus2wplus(s_w) - if vtoonify.backbone == 'dualstylegan': - if args.color_transfer: - s_w = exstyle - else: - s_w[:,:7] = exstyle[:,:7] - - x = transform(frame).unsqueeze(dim=0).to(device) - # parsing network works best on 512x512 images, so we predict parsing maps on upsmapled frames - # followed by downsampling the parsing maps - x_p = F.interpolate(parsingpredictor(2*(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)))[0], - scale_factor=0.5, recompute_scale_factor=False).detach() - # we give parsing maps lower weight (1/16) - inputs = torch.cat((x, x_p/16.), dim=1) - # d_s has no effect when backbone is toonify - y_tilde = vtoonify(inputs, s_w.repeat(inputs.size(0), 1, 1), d_s = args.style_degree) - y_tilde = torch.clamp(y_tilde, -1, 1) - - cv2.imwrite(cropname, cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)) - save_image(y_tilde[0].cpu(), savename) - - print('Transfer style successfully!') \ No newline at end of file diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/markdown_it/presets/__init__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/markdown_it/presets/__init__.py deleted file mode 100644 index 16f10e51f13c94b68876d5be5113c79b81e13ac1..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/markdown_it/presets/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -__all__ = ("commonmark", "default", "zero", "js_default", "gfm_like") - -from . import commonmark, default, zero - -js_default = default - - -class gfm_like: - """GitHub Flavoured Markdown (GFM) like. - - This adds the linkify, table and strikethrough components to CommmonMark. - - Note, it lacks task-list items and raw HTML filtering, - to meet the the full GFM specification - (see https://github.github.com/gfm/#autolinks-extension-). - """ - - @staticmethod - def make(): - config = commonmark.make() - config["components"]["core"]["rules"].append("linkify") - config["components"]["block"]["rules"].append("table") - config["components"]["inline"]["rules"].append("strikethrough") - config["components"]["inline"]["rules2"].append("strikethrough") - config["options"]["linkify"] = True - config["options"]["html"] = True - return config diff --git a/spaces/lambdalabs/LambdaSuperRes/KAIR/data/dataset_sr.py b/spaces/lambdalabs/LambdaSuperRes/KAIR/data/dataset_sr.py deleted file mode 100644 index 8e1c11c7bfbd7e4aecd9a9e5b44f73ad4e81bc3e..0000000000000000000000000000000000000000 --- a/spaces/lambdalabs/LambdaSuperRes/KAIR/data/dataset_sr.py +++ /dev/null @@ -1,197 +0,0 @@ -import math -import numpy as np -import random -import torch -import torch.utils.data as data -import utils.utils_image as util -from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels -from basicsr.utils import DiffJPEG, USMSharp -from numpy.typing import NDArray -from PIL import Image -from utils.utils_video import img2tensor -from torch import Tensor - -from data.degradations import apply_real_esrgan_degradations - -class DatasetSR(data.Dataset): - ''' - # ----------------------------------------- - # Get L/H for SISR. - # If only "paths_H" is provided, sythesize bicubicly downsampled L on-the-fly. - # ----------------------------------------- - # e.g., SRResNet - # ----------------------------------------- - ''' - - def __init__(self, opt): - super(DatasetSR, self).__init__() - self.opt = opt - self.n_channels = opt['n_channels'] if opt['n_channels'] else 3 - self.sf = opt['scale'] if opt['scale'] else 4 - self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 96 - self.L_size = self.patch_size // self.sf - - # ------------------------------------ - # get paths of L/H - # ------------------------------------ - self.paths_H = util.get_image_paths(opt['dataroot_H']) - self.paths_L = util.get_image_paths(opt['dataroot_L']) - - assert self.paths_H, 'Error: H path is empty.' - if self.paths_L and self.paths_H: - assert len(self.paths_L) == len(self.paths_H), 'L/H mismatch - {}, {}.'.format(len(self.paths_L), len(self.paths_H)) - - self.jpeg_simulator = DiffJPEG() - self.usm_sharpener = USMSharp() - - blur_kernel_list1 = ['iso', 'aniso', 'generalized_iso', - 'generalized_aniso', 'plateau_iso', 'plateau_aniso'] - blur_kernel_list2 = ['iso', 'aniso', 'generalized_iso', - 'generalized_aniso', 'plateau_iso', 'plateau_aniso'] - blur_kernel_prob1 = [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] - blur_kernel_prob2 = [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] - kernel_size = 21 - blur_sigma1 = [0.05, 0.2] - blur_sigma2 = [0.05, 0.1] - betag_range1 = [0.7, 1.3] - betag_range2 = [0.7, 1.3] - betap_range1 = [0.7, 1.3] - betap_range2 = [0.7, 1.3] - - def _decide_kernels(self) -> NDArray: - blur_kernel1 = random_mixed_kernels( - self.blur_kernel_list1, - self.blur_kernel_prob1, - self.kernel_size, - self.blur_sigma1, - self.blur_sigma1, [-math.pi, math.pi], - self.betag_range1, - self.betap_range1, - noise_range=None - ) - blur_kernel2 = random_mixed_kernels( - self.blur_kernel_list2, - self.blur_kernel_prob2, - self.kernel_size, - self.blur_sigma2, - self.blur_sigma2, [-math.pi, math.pi], - self.betag_range2, - self.betap_range2, - noise_range=None - ) - if self.kernel_size < 13: - omega_c = np.random.uniform(np.pi / 3, np.pi) - else: - omega_c = np.random.uniform(np.pi / 5, np.pi) - sinc_kernel = circular_lowpass_kernel(omega_c, self.kernel_size, pad_to=21) - return (blur_kernel1, blur_kernel2, sinc_kernel) - - def __getitem__(self, index): - - L_path = None - # ------------------------------------ - # get H image - # ------------------------------------ - H_path = self.paths_H[index] - img_H = util.imread_uint(H_path, self.n_channels) - img_H = util.uint2single(img_H) - - # ------------------------------------ - # modcrop - # ------------------------------------ - img_H = util.modcrop(img_H, self.sf) - - # ------------------------------------ - # get L image - # ------------------------------------ - if self.paths_L: - # -------------------------------- - # directly load L image - # -------------------------------- - L_path = self.paths_L[index] - img_L = util.imread_uint(L_path, self.n_channels) - img_L = util.uint2single(img_L) - - else: - # -------------------------------- - # sythesize L image via matlab's bicubic - # -------------------------------- - H, W = img_H.shape[:2] - img_L = util.imresize_np(img_H, 1 / self.sf, True) - - src_tensor = img2tensor(img_L.copy(), bgr2rgb=False, - float32=True).unsqueeze(0) - - blur_kernel1, blur_kernel2, sinc_kernel = self._decide_kernels() - (img_L_2, sharp_img_L, degraded_img_L) = apply_real_esrgan_degradations( - src_tensor, - blur_kernel1=Tensor(blur_kernel1).unsqueeze(0), - blur_kernel2=Tensor(blur_kernel2).unsqueeze(0), - second_blur_prob=0.2, - sinc_kernel=Tensor(sinc_kernel).unsqueeze(0), - resize_prob1=[0.2, 0.7, 0.1], - resize_prob2=[0.3, 0.4, 0.3], - resize_range1=[0.9, 1.1], - resize_range2=[0.9, 1.1], - gray_noise_prob1=0.2, - gray_noise_prob2=0.2, - gaussian_noise_prob1=0.2, - gaussian_noise_prob2=0.2, - noise_range=[0.01, 0.2], - poisson_scale_range=[0.05, 0.45], - jpeg_compression_range1=[85, 100], - jpeg_compression_range2=[85, 100], - jpeg_simulator=self.jpeg_simulator, - random_crop_gt_size=256, - sr_upsample_scale=1, - usm_sharpener=self.usm_sharpener - ) - # Image.fromarray((degraded_img_L[0] * 255).permute( - # 1, 2, 0).cpu().numpy().astype(np.uint8)).save( - # "/home/cll/Desktop/degraded_L.png") - # Image.fromarray((img_L * 255).astype(np.uint8)).save( - # "/home/cll/Desktop/img_L.png") - # Image.fromarray((img_L_2[0] * 255).permute( - # 1, 2, 0).cpu().numpy().astype(np.uint8)).save( - # "/home/cll/Desktop/img_L_2.png") - # exit() - - # ------------------------------------ - # if train, get L/H patch pair - # ------------------------------------ - if self.opt['phase'] == 'train': - - H, W, C = img_L.shape - - # -------------------------------- - # randomly crop the L patch - # -------------------------------- - rnd_h = random.randint(0, max(0, H - self.L_size)) - rnd_w = random.randint(0, max(0, W - self.L_size)) - img_L = img_L[rnd_h:rnd_h + self.L_size, rnd_w:rnd_w + self.L_size, :] - - # -------------------------------- - # crop corresponding H patch - # -------------------------------- - rnd_h_H, rnd_w_H = int(rnd_h * self.sf), int(rnd_w * self.sf) - img_H = img_H[rnd_h_H:rnd_h_H + self.patch_size, rnd_w_H:rnd_w_H + self.patch_size, :] - - # -------------------------------- - # augmentation - flip and/or rotate + RealESRGAN modified degradations - # -------------------------------- - mode = random.randint(0, 7) - img_L, img_H = util.augment_img(img_L, mode=mode), util.augment_img(img_H, mode=mode) - - - # ------------------------------------ - # L/H pairs, HWC to CHW, numpy to tensor - # ------------------------------------ - img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L) - - if L_path is None: - L_path = H_path - - return {'L': img_L, 'H': img_H, 'L_path': L_path, 'H_path': H_path} - - def __len__(self): - return len(self.paths_H) diff --git a/spaces/lightli/bingo-newbing/src/components/chat-notification.tsx b/spaces/lightli/bingo-newbing/src/components/chat-notification.tsx deleted file mode 100644 index 4be24d0f1755c8058698cfa66c736d8d4792475a..0000000000000000000000000000000000000000 --- a/spaces/lightli/bingo-newbing/src/components/chat-notification.tsx +++ /dev/null @@ -1,77 +0,0 @@ -import { useEffect } from 'react' -import Image from 'next/image' - -import IconWarning from '@/assets/images/warning.svg' -import { ChatError, ErrorCode, ChatMessageModel } from '@/lib/bots/bing/types' -import { ExternalLink } from './external-link' -import { useBing } from '@/lib/hooks/use-bing' - -export interface ChatNotificationProps extends Pick, 'bot'> { - message?: ChatMessageModel -} - -function getAction(error: ChatError, reset: () => void) { - if (error.code === ErrorCode.THROTTLE_LIMIT) { - reset() - return ( -
        - 你已达到每日最大发送消息次数,请更换账号或隔一天后重试 -
        - ) - } - if (error.code === ErrorCode.BING_FORBIDDEN) { - return ( - - 你的账号已在黑名单,请尝试更换账号及申请解封 - - ) - } - if (error.code === ErrorCode.CONVERSATION_LIMIT) { - return ( -
        - 当前话题已中止,请点 - 重新开始 - 开启新的对话 -
        - ) - } - if (error.code === ErrorCode.BING_CAPTCHA) { - return ( - - 点击通过人机验证 - - ) - } - if (error.code === ErrorCode.BING_UNAUTHORIZED) { - reset() - return ( - 没有获取到身份信息或身份信息失效,点此重新设置 - ) - } - return error.message -} - -export function ChatNotification({ message, bot }: ChatNotificationProps) { - useEffect(() => { - window.scrollBy(0, 2000) - }, [message]) - - if (!message?.error) return - - return ( -
        -
        -
        -
        -
        - error - {getAction(message.error, () => bot.resetConversation())} -
        -
        -
        -
        -
        - ) -} diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Kenways Fleet Offline Skidrow Crack.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Kenways Fleet Offline Skidrow Crack.md deleted file mode 100644 index 2865ad98c003e68360bcc331ad119a1cbf2f7a9f..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Kenways Fleet Offline Skidrow Crack.md +++ /dev/null @@ -1,6 +0,0 @@ - -

        . Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. A cracked one ) now im 20 just wonder where to buy it and now here is it!. I could since I can't connect to ubisoft servers for the Kenways Fleet.

        -

        kenway's fleet offline skidrow crack


        Download Zip ✏ ✏ ✏ https://bytlly.com/2uGvWu



        -

        . Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack. Kenway's Fleet Offline Skidrow Crack.

        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/littlesujin/littlesujin/Dockerfile b/spaces/littlesujin/littlesujin/Dockerfile deleted file mode 100644 index 774ee5eef237fcf56943968ee9cbbadf0ab61585..0000000000000000000000000000000000000000 --- a/spaces/littlesujin/littlesujin/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# Build Stage -# 使用 golang:alpine 作为构建阶段的基础镜像 -FROM golang:alpine AS builder - -# 添加 git,以便之后能从GitHub克隆项目 -RUN apk --no-cache add git - -# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下 -RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app - -# 设置工作目录为之前克隆的项目目录 -WORKDIR /workspace/app - -# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小 -RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go - -# Runtime Stage -# 使用轻量级的 alpine 镜像作为运行时的基础镜像 -FROM alpine - -# 设置工作目录 -WORKDIR /workspace/app - -# 从构建阶段复制编译后的二进制文件到运行时镜像中 -COPY --from=builder /workspace/app/go-proxy-bingai . - -# 设置环境变量,此处为随机字符 -ENV Go_Proxy_BingAI_USER_TOKEN_1="WSs5eA77rfZTFqwaWIO2rB3bW5OGTs7" - -# 暴露8080端口 -EXPOSE 8080 - -# 容器启动时运行的命令 -CMD ["/workspace/app/go-proxy-bingai"] \ No newline at end of file diff --git a/spaces/longlian/llm-grounded-diffusion/utils/latents.py b/spaces/longlian/llm-grounded-diffusion/utils/latents.py deleted file mode 100644 index 8f3498c1e074bd0b6b9279d98852836a33b8f984..0000000000000000000000000000000000000000 --- a/spaces/longlian/llm-grounded-diffusion/utils/latents.py +++ /dev/null @@ -1,161 +0,0 @@ -import torch -import numpy as np -from . import utils -from utils import torch_device -import matplotlib.pyplot as plt - -def get_unscaled_latents(batch_size, in_channels, height, width, generator, dtype): - """ - in_channels: often obtained with `unet.config.in_channels` - """ - # Obtain with torch.float32 and cast to float16 if needed - # Directly obtaining latents in float16 will lead to different latents - latents_base = torch.randn( - (batch_size, in_channels, height // 8, width // 8), - generator=generator, dtype=dtype - ).to(torch_device, dtype=dtype) - - return latents_base - -def get_scaled_latents(batch_size, in_channels, height, width, generator, dtype, scheduler): - latents_base = get_unscaled_latents(batch_size, in_channels, height, width, generator, dtype) - latents_base = latents_base * scheduler.init_noise_sigma - return latents_base - -def blend_latents(latents_bg, latents_fg, fg_mask, fg_blending_ratio=0.01): - """ - in_channels: often obtained with `unet.config.in_channels` - """ - assert not torch.allclose(latents_bg, latents_fg), "latents_bg should be independent with latents_fg" - - dtype = latents_bg.dtype - latents = latents_bg * (1. - fg_mask) + (latents_bg * np.sqrt(1. - fg_blending_ratio) + latents_fg * np.sqrt(fg_blending_ratio)) * fg_mask - latents = latents.to(dtype=dtype) - - return latents - -@torch.no_grad() -def compose_latents(model_dict, latents_all_list, mask_tensor_list, num_inference_steps, overall_batch_size, height, width, latents_bg=None, bg_seed=None, compose_box_to_bg=True, use_fast_schedule=False, fast_after_steps=None): - unet, scheduler, dtype = model_dict.unet, model_dict.scheduler, model_dict.dtype - - if latents_bg is None: - generator = torch.manual_seed(bg_seed) # Seed generator to create the inital latent noise - latents_bg = get_scaled_latents(overall_batch_size, unet.config.in_channels, height, width, generator, dtype, scheduler) - - # Other than t=T (idx=0), we only have masked latents. This is to prevent accidentally loading from non-masked part. Use same mask as the one used to compose the latents. - if use_fast_schedule: - # If we use fast schedule, we only compose the frozen steps because the later steps do not match. - composed_latents = torch.zeros((fast_after_steps + 1, *latents_bg.shape), dtype=dtype) - else: - # Otherwise we compose all steps so that we don't need to compose again if we change the frozen steps. - composed_latents = torch.zeros((num_inference_steps + 1, *latents_bg.shape), dtype=dtype) - composed_latents[0] = latents_bg - - foreground_indices = torch.zeros(latents_bg.shape[-2:], dtype=torch.long) - - mask_size = np.array([mask_tensor.sum().item() for mask_tensor in mask_tensor_list]) - # Compose the largest mask first - mask_order = np.argsort(-mask_size) - - if compose_box_to_bg: - # This has two functionalities: - # 1. copies the right initial latents from the right place (for centered so generation), 2. copies the right initial latents (since we have foreground blending) for centered/original so generation. - for mask_idx in mask_order: - latents_all, mask_tensor = latents_all_list[mask_idx], mask_tensor_list[mask_idx] - - # Note: need to be careful to not copy from zeros due to shifting. - mask_tensor = utils.binary_mask_to_box_mask(mask_tensor, to_device=False) - - mask_tensor_expanded = mask_tensor[None, None, None, ...].to(dtype) - composed_latents[0] = composed_latents[0] * (1. - mask_tensor_expanded) + latents_all[0] * mask_tensor_expanded - - # This is still needed with `compose_box_to_bg` to ensure the foreground latent is still visible and to compute foreground indices. - for mask_idx in mask_order: - latents_all, mask_tensor = latents_all_list[mask_idx], mask_tensor_list[mask_idx] - foreground_indices = foreground_indices * (~mask_tensor) + (mask_idx + 1) * mask_tensor - mask_tensor_expanded = mask_tensor[None, None, None, ...].to(dtype) - if use_fast_schedule: - composed_latents = composed_latents * (1. - mask_tensor_expanded) + latents_all[:fast_after_steps + 1] * mask_tensor_expanded - else: - composed_latents = composed_latents * (1. - mask_tensor_expanded) + latents_all * mask_tensor_expanded - - composed_latents, foreground_indices = composed_latents.to(torch_device), foreground_indices.to(torch_device) - return composed_latents, foreground_indices - -def align_with_bboxes(latents_all_list, mask_tensor_list, bboxes, horizontal_shift_only=False): - """ - Each offset in `offset_list` is `(x_offset, y_offset)` (normalized). - """ - new_latents_all_list, new_mask_tensor_list, offset_list = [], [], [] - for latents_all, mask_tensor, bbox in zip(latents_all_list, mask_tensor_list, bboxes): - x_src_center, y_src_center = utils.binary_mask_to_center(mask_tensor, normalize=True) - x_min_dest, y_min_dest, x_max_dest, y_max_dest = bbox - x_dest_center, y_dest_center = (x_min_dest + x_max_dest) / 2, (y_min_dest + y_max_dest) / 2 - # print("src (x,y):", x_src_center, y_src_center, "dest (x,y):", x_dest_center, y_dest_center) - x_offset, y_offset = x_dest_center - x_src_center, y_dest_center - y_src_center - if horizontal_shift_only: - y_offset = 0. - offset = x_offset, y_offset - latents_all = utils.shift_tensor(latents_all, x_offset, y_offset, offset_normalized=True) - mask_tensor = utils.shift_tensor(mask_tensor, x_offset, y_offset, offset_normalized=True) - new_latents_all_list.append(latents_all) - new_mask_tensor_list.append(mask_tensor) - offset_list.append(offset) - - return new_latents_all_list, new_mask_tensor_list, offset_list - -@torch.no_grad() -def compose_latents_with_alignment( - model_dict, latents_all_list, mask_tensor_list, num_inference_steps, overall_batch_size, height, width, - align_with_overall_bboxes=True, overall_bboxes=None, horizontal_shift_only=False, **kwargs -): - if align_with_overall_bboxes and len(latents_all_list): - expanded_overall_bboxes = utils.expand_overall_bboxes(overall_bboxes) - latents_all_list, mask_tensor_list, offset_list = align_with_bboxes(latents_all_list, mask_tensor_list, bboxes=expanded_overall_bboxes, horizontal_shift_only=horizontal_shift_only) - else: - offset_list = [(0., 0.) for _ in range(len(latents_all_list))] - composed_latents, foreground_indices = compose_latents(model_dict, latents_all_list, mask_tensor_list, num_inference_steps, overall_batch_size, height, width, **kwargs) - return composed_latents, foreground_indices, offset_list - -def get_input_latents_list(model_dict, bg_seed, fg_seed_start, fg_blending_ratio, height, width, so_prompt_phrase_box_list=None, so_boxes=None, verbose=False): - """ - Note: the returned input latents are scaled by `scheduler.init_noise_sigma` - """ - unet, scheduler, dtype = model_dict.unet, model_dict.scheduler, model_dict.dtype - - generator_bg = torch.manual_seed(bg_seed) # Seed generator to create the inital latent noise - latents_bg = get_unscaled_latents(batch_size=1, in_channels=unet.config.in_channels, height=height, width=width, generator=generator_bg, dtype=dtype) - - input_latents_list = [] - - if so_boxes is None: - # For compatibility - so_boxes = [item[-1] for item in so_prompt_phrase_box_list] - - # change this changes the foreground initial noise - for idx, obj_box in enumerate(so_boxes): - H, W = height // 8, width // 8 - fg_mask = utils.proportion_to_mask(obj_box, H, W) - - if verbose: - plt.imshow(fg_mask.cpu().numpy()) - plt.show() - - fg_seed = fg_seed_start + idx - if fg_seed == bg_seed: - # We should have different seeds for foreground and background - fg_seed += 12345 - - generator_fg = torch.manual_seed(fg_seed) - latents_fg = get_unscaled_latents(batch_size=1, in_channels=unet.config.in_channels, height=height, width=width, generator=generator_fg, dtype=dtype) - - input_latents = blend_latents(latents_bg, latents_fg, fg_mask, fg_blending_ratio=fg_blending_ratio) - - input_latents = input_latents * scheduler.init_noise_sigma - - input_latents_list.append(input_latents) - - latents_bg = latents_bg * scheduler.init_noise_sigma - - return input_latents_list, latents_bg - diff --git a/spaces/loss4Wang/architecture_styles/app.py b/spaces/loss4Wang/architecture_styles/app.py deleted file mode 100644 index 60a15a7b671d33bd8f32faf1b925df263ecd6ca3..0000000000000000000000000000000000000000 --- a/spaces/loss4Wang/architecture_styles/app.py +++ /dev/null @@ -1,15 +0,0 @@ -from fastai.vision.all import * -import gradio as gr - -learn = load_learner('archi_styles_model.pkl') - -styles = ('Baroque Architecture','Gothic Architecture', 'Greek and Roman Classical Architecture', 'Modern Architecture', 'Neoclassical Architecture', 'Neofuturist Architecture', 'Post-Modern Architecture', 'Victorian Architecture') -examples = ['Baroque Architecture.jpg', 'Gothic Architecture.jpg', 'Modern Architecture.jpg'] - -# Use dict() and zip() to build a dict, not formaliar with data object in python. -def image_classifier(img): - pred_styles,pred_idx,probs = learn.predict(img) - return dict(zip(styles, map(float, probs))) - -demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label", examples = examples) -demo.launch() \ No newline at end of file diff --git a/spaces/ludusc/latent-space-theories/torch_utils/persistence.py b/spaces/ludusc/latent-space-theories/torch_utils/persistence.py deleted file mode 100644 index f90ce85e8ace0f44e839158b22c5790de448d82d..0000000000000000000000000000000000000000 --- a/spaces/ludusc/latent-space-theories/torch_utils/persistence.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Facilities for pickling Python code alongside other data. - -The pickled code is automatically imported into a separate Python module -during unpickling. This way, any previously exported pickles will remain -usable even if the original code is no longer available, or if the current -version of the code is not consistent with what was originally pickled.""" - -import sys -import pickle -import io -import inspect -import copy -import uuid -import types -import dnnlib - -#---------------------------------------------------------------------------- - -_version = 6 # internal version number -_decorators = set() # {decorator_class, ...} -_import_hooks = [] # [hook_function, ...] -_module_to_src_dict = dict() # {module: src, ...} -_src_to_module_dict = dict() # {src: module, ...} - -#---------------------------------------------------------------------------- - -def persistent_class(orig_class): - r"""Class decorator that extends a given class to save its source code - when pickled. - - Example: - - from torch_utils import persistence - - @persistence.persistent_class - class MyNetwork(torch.nn.Module): - def __init__(self, num_inputs, num_outputs): - super().__init__() - self.fc = MyLayer(num_inputs, num_outputs) - ... - - @persistence.persistent_class - class MyLayer(torch.nn.Module): - ... - - When pickled, any instance of `MyNetwork` and `MyLayer` will save its - source code alongside other internal state (e.g., parameters, buffers, - and submodules). This way, any previously exported pickle will remain - usable even if the class definitions have been modified or are no - longer available. - - The decorator saves the source code of the entire Python module - containing the decorated class. It does *not* save the source code of - any imported modules. Thus, the imported modules must be available - during unpickling, also including `torch_utils.persistence` itself. - - It is ok to call functions defined in the same module from the - decorated class. However, if the decorated class depends on other - classes defined in the same module, they must be decorated as well. - This is illustrated in the above example in the case of `MyLayer`. - - It is also possible to employ the decorator just-in-time before - calling the constructor. For example: - - cls = MyLayer - if want_to_make_it_persistent: - cls = persistence.persistent_class(cls) - layer = cls(num_inputs, num_outputs) - - As an additional feature, the decorator also keeps track of the - arguments that were used to construct each instance of the decorated - class. The arguments can be queried via `obj.init_args` and - `obj.init_kwargs`, and they are automatically pickled alongside other - object state. A typical use case is to first unpickle a previous - instance of a persistent class, and then upgrade it to use the latest - version of the source code: - - with open('old_pickle.pkl', 'rb') as f: - old_net = pickle.load(f) - new_net = MyNetwork(*old_obj.init_args, **old_obj.init_kwargs) - misc.copy_params_and_buffers(old_net, new_net, require_all=True) - """ - assert isinstance(orig_class, type) - if is_persistent(orig_class): - return orig_class - - assert orig_class.__module__ in sys.modules - orig_module = sys.modules[orig_class.__module__] - orig_module_src = _module_to_src(orig_module) - - class Decorator(orig_class): - _orig_module_src = orig_module_src - _orig_class_name = orig_class.__name__ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._init_args = copy.deepcopy(args) - self._init_kwargs = copy.deepcopy(kwargs) - assert orig_class.__name__ in orig_module.__dict__ - _check_pickleable(self.__reduce__()) - - @property - def init_args(self): - return copy.deepcopy(self._init_args) - - @property - def init_kwargs(self): - return dnnlib.EasyDict(copy.deepcopy(self._init_kwargs)) - - def __reduce__(self): - fields = list(super().__reduce__()) - fields += [None] * max(3 - len(fields), 0) - if fields[0] is not _reconstruct_persistent_obj: - meta = dict(type='class', version=_version, module_src=self._orig_module_src, class_name=self._orig_class_name, state=fields[2]) - fields[0] = _reconstruct_persistent_obj # reconstruct func - fields[1] = (meta,) # reconstruct args - fields[2] = None # state dict - return tuple(fields) - - Decorator.__name__ = orig_class.__name__ - _decorators.add(Decorator) - return Decorator - -#---------------------------------------------------------------------------- - -def is_persistent(obj): - r"""Test whether the given object or class is persistent, i.e., - whether it will save its source code when pickled. - """ - try: - if obj in _decorators: - return True - except TypeError: - pass - return type(obj) in _decorators # pylint: disable=unidiomatic-typecheck - -#---------------------------------------------------------------------------- - -def import_hook(hook): - r"""Register an import hook that is called whenever a persistent object - is being unpickled. A typical use case is to patch the pickled source - code to avoid errors and inconsistencies when the API of some imported - module has changed. - - The hook should have the following signature: - - hook(meta) -> modified meta - - `meta` is an instance of `dnnlib.EasyDict` with the following fields: - - type: Type of the persistent object, e.g. `'class'`. - version: Internal version number of `torch_utils.persistence`. - module_src Original source code of the Python module. - class_name: Class name in the original Python module. - state: Internal state of the object. - - Example: - - @persistence.import_hook - def wreck_my_network(meta): - if meta.class_name == 'MyNetwork': - print('MyNetwork is being imported. I will wreck it!') - meta.module_src = meta.module_src.replace("True", "False") - return meta - """ - assert callable(hook) - _import_hooks.append(hook) - -#---------------------------------------------------------------------------- - -def _reconstruct_persistent_obj(meta): - r"""Hook that is called internally by the `pickle` module to unpickle - a persistent object. - """ - meta = dnnlib.EasyDict(meta) - meta.state = dnnlib.EasyDict(meta.state) - for hook in _import_hooks: - meta = hook(meta) - assert meta is not None - - assert meta.version == _version - module = _src_to_module(meta.module_src) - - assert meta.type == 'class' - orig_class = module.__dict__[meta.class_name] - decorator_class = persistent_class(orig_class) - obj = decorator_class.__new__(decorator_class) - - setstate = getattr(obj, '__setstate__', None) - if callable(setstate): - setstate(meta.state) # pylint: disable=not-callable - else: - obj.__dict__.update(meta.state) - return obj - -#---------------------------------------------------------------------------- - -def _module_to_src(module): - r"""Query the source code of a given Python module. - """ - src = _module_to_src_dict.get(module, None) - if src is None: - src = inspect.getsource(module) - _module_to_src_dict[module] = src - _src_to_module_dict[src] = module - return src - -def _src_to_module(src): - r"""Get or create a Python module for the given source code. - """ - module = _src_to_module_dict.get(src, None) - if module is None: - module_name = "_imported_module_" + uuid.uuid4().hex - module = types.ModuleType(module_name) - sys.modules[module_name] = module - _module_to_src_dict[module] = src - _src_to_module_dict[src] = module - exec(src, module.__dict__) # pylint: disable=exec-used - return module - -#---------------------------------------------------------------------------- - -def _check_pickleable(obj): - r"""Check that the given object is pickleable, raising an exception if - it is not. This function is expected to be considerably more efficient - than actually pickling the object. - """ - def recurse(obj): - if isinstance(obj, (list, tuple, set)): - return [recurse(x) for x in obj] - if isinstance(obj, dict): - return [[recurse(x), recurse(y)] for x, y in obj.items()] - if isinstance(obj, (str, int, float, bool, bytes, bytearray)): - return None # Python primitive types are pickleable. - if f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor', 'torch.nn.parameter.Parameter']: - return None # NumPy arrays and PyTorch tensors are pickleable. - if is_persistent(obj): - return None # Persistent objects are pickleable, by virtue of the constructor check. - return obj - with io.BytesIO() as f: - pickle.dump(recurse(obj), f) - -#---------------------------------------------------------------------------- diff --git a/spaces/m-a-p/MERT-Music-Genre-Tagging-Prediction/README.md b/spaces/m-a-p/MERT-Music-Genre-Tagging-Prediction/README.md deleted file mode 100644 index 060dee938d7f2aa958376b1dc2e3521d73a22e09..0000000000000000000000000000000000000000 --- a/spaces/m-a-p/MERT-Music-Genre-Tagging-Prediction/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Music Tagging -emoji: 💻 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false -license: cc-by-nc-sa-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ma-xu/LIVE/thrust/thrust/detail/complex/cpow.h b/spaces/ma-xu/LIVE/thrust/thrust/detail/complex/cpow.h deleted file mode 100644 index 2d6ad051eb18b47cb628a1673e64ba6584d52de8..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/detail/complex/cpow.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * Copyright 2013 Filipe RNC Maia - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include - -namespace thrust { - -template -__host__ __device__ -complex::type> -pow(const complex& x, const complex& y) -{ - typedef typename detail::promoted_numerical_type::type T; - return exp(log(complex(x)) * complex(y)); -} - -template -__host__ __device__ -complex::type> -pow(const complex& x, const T1& y) -{ - typedef typename detail::promoted_numerical_type::type T; - return exp(log(complex(x)) * T(y)); -} - -template -__host__ __device__ -complex::type> -pow(const T0& x, const complex& y) -{ - typedef typename detail::promoted_numerical_type::type T; - // Find `log` by ADL. - using std::log; - return exp(log(T(x)) * complex(y)); -} - -} // end namespace thrust - diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/logical.h b/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/logical.h deleted file mode 100644 index ee4586273566707b780b50a7ba13e2a3a038ac6e..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/logical.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system has no special logical functions - diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/tbb/detail/extrema.h b/spaces/ma-xu/LIVE/thrust/thrust/system/tbb/detail/extrema.h deleted file mode 100644 index e0dd4c042b38bafb42d683e2f4f19bab3678a4b4..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/tbb/detail/extrema.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include - -namespace thrust -{ -namespace system -{ -namespace tbb -{ -namespace detail -{ - -template -ForwardIterator max_element(execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - BinaryPredicate comp) -{ - // tbb prefers generic::max_element to cpp::max_element - return thrust::system::detail::generic::max_element(exec, first, last, comp); -} // end max_element() - -template -ForwardIterator min_element(execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - BinaryPredicate comp) -{ - // tbb prefers generic::min_element to cpp::min_element - return thrust::system::detail::generic::min_element(exec, first, last, comp); -} // end min_element() - -template -thrust::pair minmax_element(execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - BinaryPredicate comp) -{ - // tbb prefers generic::minmax_element to cpp::minmax_element - return thrust::system::detail::generic::minmax_element(exec, first, last, comp); -} // end minmax_element() - -} // end detail -} // end tbb -} // end system -} // end thrust - - diff --git a/spaces/magicr/BuboGPT/bubogpt/common/gradcam.py b/spaces/magicr/BuboGPT/bubogpt/common/gradcam.py deleted file mode 100644 index d53a5254d4b319eaf2cbfbd081b0ca8e38c5c7a0..0000000000000000000000000000000000000000 --- a/spaces/magicr/BuboGPT/bubogpt/common/gradcam.py +++ /dev/null @@ -1,24 +0,0 @@ -import numpy as np -from matplotlib import pyplot as plt -from scipy.ndimage import filters -from skimage import transform as skimage_transform - - -def getAttMap(img, attMap, blur=True, overlap=True): - attMap -= attMap.min() - if attMap.max() > 0: - attMap /= attMap.max() - attMap = skimage_transform.resize(attMap, (img.shape[:2]), order=3, mode="constant") - if blur: - attMap = filters.gaussian_filter(attMap, 0.02 * max(img.shape[:2])) - attMap -= attMap.min() - attMap /= attMap.max() - cmap = plt.get_cmap("jet") - attMapV = cmap(attMap) - attMapV = np.delete(attMapV, 3, 2) - if overlap: - attMap = ( - 1 * (1 - attMap**0.7).reshape(attMap.shape + (1,)) * img - + (attMap**0.7).reshape(attMap.shape + (1,)) * attMapV - ) - return attMap diff --git a/spaces/marioboy/neil-breen/encoder/data_objects/__init__.py b/spaces/marioboy/neil-breen/encoder/data_objects/__init__.py deleted file mode 100644 index ef04ade68544d0477a7f6deb4e7d51e97f592910..0000000000000000000000000000000000000000 --- a/spaces/marioboy/neil-breen/encoder/data_objects/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset -from encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataLoader diff --git a/spaces/mattclifford1/IQM-VIS/utils.py b/spaces/mattclifford1/IQM-VIS/utils.py deleted file mode 100644 index 72d39a792b21d7e865bf05a5fd228175551db6c4..0000000000000000000000000000000000000000 --- a/spaces/mattclifford1/IQM-VIS/utils.py +++ /dev/null @@ -1,124 +0,0 @@ -''' -utils for streamlit IQM-VIS app -get data/images and transformations / metrics -''' -# Author: Matt Clifford -import cv2 -import numpy as np -import matplotlib.pyplot as plt -import IQM_VIS -import streamlit as st - -@st.cache -def image_bytes_to_np(bytes_data, resize=None): - image = cv2.imdecode(np.frombuffer(bytes_data, np.uint8), -1) - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - image = image.astype(np.float32) / 255.0 - if resize is not None: - image = IQM_VIS.utils.resize_to_longest_side(image, resize) - return image - -def load_image(file): - return IQM_VIS.utils.load_image(file) - -def load_sample_image(im_num): - return load_image(f'images/waves{im_num}.jpeg') - -def transform_image(image, sliders, trans): - for key in sliders.keys(): - image = trans[key]['function'](image, sliders[key]) - return image - -def get_data_store(im1, im2): - metric = {'MAE': IQM_VIS.metrics.MAE, - 'MSE': IQM_VIS.metrics.MSE, - '1-SSIM': IQM_VIS.metrics.ssim()} - metric_images = {'MSE': IQM_VIS.metrics.MSE_image, - 'SSIM': IQM_VIS.metrics.SSIM_image()} - - # add numpy image and the metrics to the data handler - data_store = IQM_VIS.data_holder(im1, - im2, - metric, - metric_images) - return data_store - -def plot_metrics(metrics): - fig, axes = plt.subplots() - bar_plt = IQM_VIS.utils.bar_plotter(bar_names=[''], - var_names=list(metrics.keys()), - ax=axes) - bar_plt.plot('', list(metrics.values())) - bar_plt.set_style() - return fig - -def get_transformations(): - trans = { - 'rotation':{'min':-90, 'max':90, 'function':IQM_VIS.transforms.rotation, 'init_value':0}, # normal input - 'blur':{'min':1, 'max':41, 'function':IQM_VIS.transforms.blur, 'init_value':1}, # only odd ints - 'brightness':{'min':-0.4, 'max':0.4, 'function':IQM_VIS.transforms.brightness, 'init_value':0.0}, # normal but with float - 'x_shift':{'min':-0.1, 'max':0.1, 'function':IQM_VIS.transforms.x_shift, 'init_value': 0.0}, - 'y_shift':{'min':-0.1, 'max':0.1, 'function':IQM_VIS.transforms.y_shift, 'init_value': 0.0}, - 'zoom':{'min':0.8, 'max':1.2, 'function':IQM_VIS.transforms.zoom_image, 'init_value': 1.0, 'num_values':21}, # requires non standard slider params - # 'threshold':{'min':-40, 'max':40, 'function':IQM_VIS.transforms.binary_threshold, 'init_value': 0}, - 'jpeg compression':{'init_value':100, 'min':1, 'max':100, 'function':IQM_VIS.transforms.jpeg_compression, 'init_value': 100}, - } - return trans - -def get_metric_params(): - params = {'sigma': {'min':0.25, 'max':5.25, 'init_value': 1.5}, # for the guassian kernel - # 'kernel_size': {'min':1, 'max':41, 'normalise':'odd', 'init_value': 11}, # ignored if guassian kernel used - 'k1': {'min':0.01, 'max':0.21, 'init_value': 0.01}, - 'k2': {'min':0.01, 'max':0.21, 'init_value': 0.03}} - return params - -''' -dont track cuda objects hwen caching as streamlit cannot yet do that -''' -# def cuda_cache(tensor): -# tensor = tensor.cpu().detach().numpy() -# return hash(str(tensor)) - -''' -metric averaging plots -''' -@st.experimental_singleton # all vars starting with _ will be cached -def get_metrics_avg_graphs(_data_store, _sliders_trans, _sliders_metric_params, _transformations, ref_im_name, transf_im_name, update_graphs): - results = IQM_VIS.utils.compute_metrics_over_range(_data_store, _transformations, _sliders_trans, _sliders_metric_params) - fig = plt.Figure() - axes = fig.add_subplot(111, polar=True) - metrics_names = list(_data_store.metrics.keys()) - transformation_names = list(_transformations.keys()) - _ = IQM_VIS.utils.get_radar_plots_avg(results, metrics_names, transformation_names, axes) - return fig - -''' -metric range plots -''' -# @st.cache - cannot cache cuda data -@st.experimental_singleton # all vars starting with _ will be cached -def get_metric_range_graphs(_data_store, trans_to_plot, _sliders_metric_params, _transformations, ref_im_name, transf_im_name, update_graphs): - metric_range_results = [] - # use the initiased/default values for all sliders - init_trans_params = {} - for trans in _transformations.keys(): - init_trans_params[trans] = _transformations[trans]['init_value'] - results = IQM_VIS.utils.compute_metrics_over_range(_data_store, _transformations, init_trans_params, _sliders_metric_params) - # now get the plot - fig = plt.Figure() - axes = fig.add_subplot(111) - plotter = IQM_VIS.utils.get_transform_range_plots(results, trans_to_plot, axes) - plotter.set_style() - return fig - -''' -UI utils -''' -def reset_all_sliders(dict_list, group_list): - for slider_config, group in zip(dict_list, group_list): - reset_slider_group(slider_config, group) - -def reset_slider_group(slider_config, group): - for key, item in slider_config.items(): - if st.session_state[f"{group}_{key}"] != item['init_value']: # dont change when correct value as init_value can throw a warning/exception - st.session_state[f"{group}_{key}"] = item['init_value'] diff --git a/spaces/matthoffner/open-codetree/graphql/definitions/comment.definition.ts b/spaces/matthoffner/open-codetree/graphql/definitions/comment.definition.ts deleted file mode 100644 index cb4dd6cd7cab9d0e318dbfd4020202eb37aa2f08..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/open-codetree/graphql/definitions/comment.definition.ts +++ /dev/null @@ -1,97 +0,0 @@ -import { gql } from "@apollo/client"; - -export const CommentsQuery = gql(`query Comments($input: CommentsInput!) { - comments(input: $input) { - data { - id - message - parentId - authorId - createdAt - updatedAt - author { - id - email - username - avatar - } - } - message - status - } -}`); - -export const CreateCommentMutation = - gql(`mutation CreateComment($input: CreateCommentInput!) { - createComment(input: $input) { - data { - id - message - parentId - authorId - createdAt - updatedAt - author { - id - email - username - avatar - } - } - message - status - } -}`); - -export const UpdateCommentMutation = - gql(`mutation UpdateComment($input: UpdateCommentInput!) { - updateComment(input: $input) { - data { - id - message - parentId - authorId - createdAt - updatedAt - author { - id - email - username - avatar - } - } - message - status - } -}`); - -export const DeleteCommentMutation = - gql(`mutation DeleteComment($input: DeleteCommentInput!) { - deleteComment(input: $input) { - data { - id - } - message - status - } -}`); - -export const CommentCreatedSubscription = gql(`subscription CommentCreated { - commentCreated { - type - message - data { - id - type - receiverId - projectId - content - viewed - createdAt - sender { - avatar - username - } - } - } -}`); diff --git a/spaces/matthoffner/starchat-ui/components/Settings/Import.tsx b/spaces/matthoffner/starchat-ui/components/Settings/Import.tsx deleted file mode 100644 index 5cc9582f8322dc8584677eb9eb9801a6809f68b9..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/starchat-ui/components/Settings/Import.tsx +++ /dev/null @@ -1,51 +0,0 @@ -import { IconFileImport } from '@tabler/icons-react'; -import { FC } from 'react'; - -import { useTranslation } from 'next-i18next'; - -import { SupportedExportFormats } from '@/types/export'; - -import { SidebarButton } from '../Sidebar/SidebarButton'; - -interface Props { - onImport: (data: SupportedExportFormats) => void; -} - -export const Import: FC = ({ onImport }) => { - const { t } = useTranslation('sidebar'); - return ( - <> - { - if (!e.target.files?.length) return; - - const file = e.target.files[0]; - const reader = new FileReader(); - reader.onload = (e) => { - let json = JSON.parse(e.target?.result as string); - onImport(json); - }; - reader.readAsText(file); - }} - /> - - } - onClick={() => { - const importFile = document.querySelector( - '#import-file', - ) as HTMLInputElement; - if (importFile) { - importFile.click(); - } - }} - /> - - ); -}; diff --git a/spaces/matthoffner/starchat-ui/components/Sidebar/index.ts b/spaces/matthoffner/starchat-ui/components/Sidebar/index.ts deleted file mode 100644 index e842a8591f87200d592241de7850dab18224f4c0..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/starchat-ui/components/Sidebar/index.ts +++ /dev/null @@ -1 +0,0 @@ -export { default } from './Sidebar'; diff --git a/spaces/megaaziib/RVC-V2-Huggingface-Version/run.sh b/spaces/megaaziib/RVC-V2-Huggingface-Version/run.sh deleted file mode 100644 index 31d0be013006e9130e7b3b24d479272dd01c8acd..0000000000000000000000000000000000000000 --- a/spaces/megaaziib/RVC-V2-Huggingface-Version/run.sh +++ /dev/null @@ -1,16 +0,0 @@ -# Install Debian packages -sudo apt-get update -sudo apt-get install -qq -y build-essential ffmpeg aria2 - -# Upgrade pip and setuptools -pip install --upgrade pip -pip install --upgrade setuptools - -# Install wheel package (built-package format for Python) -pip install wheel - -# Install Python packages using pip -pip install -r requirements.txt - -# Run application locally at http://127.0.0.1:7860 -python app.py diff --git a/spaces/merve/anonymization/public/measuring-fairness/init.js b/spaces/merve/anonymization/public/measuring-fairness/init.js deleted file mode 100644 index 5a8df63793d90464eb148443787eb91e2b34180b..0000000000000000000000000000000000000000 --- a/spaces/merve/anonymization/public/measuring-fairness/init.js +++ /dev/null @@ -1,200 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - - - -d3.select('body').selectAppend('div.tooltip.tooltip-hidden') - -nCols = 12 - -window.colors = { - well: d3.color('#669399') + '', - sick: d3.color('#EE2A2A') + '', - - // well: d3.color('green') + '', - // sick: d3.color('purple'), - - // well: d3.color('#e9a3c9') + '', - // sick: d3.color('#a1d76a'), - - // well: d3.color('#e9a3c9') + '', - // sick: d3.color('#a1d76a'), - - // well: d3.color('#e9a3c9') + '', - // sick: d3.color('#a1d76a'), - - // well: d3.color('#865327') + '', - // sick: d3.color('#012394'), - - // well: d3.color('#012394') + '', - // sick: d3.color('#FBC20F') + '', - - // well: d3.color('#012394') + '', - // sick: d3.color('#E71E24') + '', - - // well: d3.color('#A9159C') + '', - // sick: d3.color('#E71E24') + '', - - // well: d3.color('#A9159C') + '', - // sick: d3.color('#012394') + '', - - // well: d3.color('orange') + '', - // sick: d3.color('#012394') + '', - - -} - -window.colors = { - well: d3.interpolate(colors.well, '#fff')(.5), - sick: d3.interpolate(colors.sick, '#fff')(.2), -} - -window.lcolors = { - well: d3.interpolate(colors.well, '#fff')(.5), - sick: d3.interpolate(colors.sick, '#fff')(.35) -} -window.llcolors = { - well: d3.interpolate(colors.well, '#fff')(.5), - sick: d3.interpolate(colors.sick, '#fff')(1) -} -window.dcolors = { - well: d3.interpolate(colors.well, '#000')(.65), - sick: d3.interpolate(colors.sick, '#000')(.65) -} - -// window.colors = { -// well: d3.color('#BEF5FF') + '', -// sick: d3.color('#FCC5C3') + '', -// } - -// window.colors = { -// well: d3.color('#669399') + '', -// sick: d3.color('#EE2A2A') + '', -// } - -// window.lcolors = { -// well: d3.interpolate(colors.well, '#fff')(.3), -// sick: d3.interpolate(colors.sick, '#fff')(.3) -// } -// window.llcolors = { -// well: d3.interpolate(colors.well, '#fff')(.2), -// sick: d3.interpolate(colors.sick, '#fff')(.2) -// } - -// window.lcolors = { -// well: '#CFFCF6', -// sick: '#FFBD96' -// } - -// copy(logColors()) -function logColors(){ - return ` - body{ - --colors-well: ${d3.rgb(colors.well)}; - --colors-sick: ${d3.rgb(colors.sick)}; - --lcolors-well: ${d3.rgb(lcolors.well)}; - --lcolors-sick: ${d3.rgb(lcolors.sick)}; - --dcolors-well: ${d3.rgb(dcolors.well)}; - --dcolors-sick: ${d3.rgb(dcolors.sick)}; - } - ` -} - - - -window.init = function(){ - console.clear() - - graphSel = d3.select('#graph').html('').append('div') - totalWidth = graphSel.node().offsetWidth - totalWidth = 400 - - c = d3.conventions({ - sel: graphSel.st({marginTop: 40}), - margin: {top: 20}, - totalWidth, - totalHeight: totalWidth, - }) - - students = makeStudents() - sel = makeSel() - mini = makeMini() - slider = makeSlider() - slides = makeSlides() - gs = makeGS() - - function sizeGraphSel(){ - var scale = (totalWidth + 35)/(innerWidth - 10) // off by one, s is 35 - scale = d3.clamp(1, scale, 2) - - graphSel.st({ - transform: `scale(${1/scale})`, - transformOrigin: '0px 0px', - - }) - } - sizeGraphSel() - d3.select(window).on('resize', sizeGraphSel) - -} -init() - - - - - -!(function(){ - var footnums = '¹²³' - - d3.selectAll('.footstart').each(function(d, i){ - d3.select(this) - .at({ - href: '#footend-' + i, - }) - .text(footnums[i]) - .parent().at({id: 'footstart-' + i}) - }) - - d3.selectAll('.footend').each(function(d, i){ - d3.select(this) - .at({ - href: '#footstart-' + i, - id: 'footend-' + i, - }) - .text(footnums[i]) - }) - - - d3.selectAll('#sections wee, #graph .weepeople').attr('aria-hidden', true) - -})() - - - - - - - - - - - - - - - - - diff --git a/spaces/merve/data-leak/public/third_party/weepeople.css b/spaces/merve/data-leak/public/third_party/weepeople.css deleted file mode 100644 index 33ed7472967ade6cddc630b1a2ad62597c1cd2b2..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/public/third_party/weepeople.css +++ /dev/null @@ -1,14 +0,0 @@ -/* https://github.com/propublica/weepeople This work is licensed under the Creative Commons Attribution-NonCommercial-NoDerivs 3.0 United States License */ - -@font-face { - font-family: 'WeePeople'; - src: url(data:application/font-woff2;charset=utf-8;base64,d09GMgABAAAAAGlAAA8AAAAA4KwAAGjcAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP0ZGVE0cGh4GYACCeggEEQgKg644grdwATYCJAOCHAuBEAAEIAWFbAeCNj93ZWJmBhvNoxNuTDxsHIAID7ZzNqKCjRMoBrCLIFmsRdl/fWAbSx+vtlRiwYRgHiehmaIe1S1xW9y/toIZegmaX6AImBEUXWQKwMwpfrH/PueHJEX5EKmupu3squ9sUbFcpFWzu6S1LNtybEuWWxI7kW25ptlOnE7iyInTiEkllSMVAoGeAKFdCCHHhVYOjiu00J6rcK38HccdV/yTTfuqSrvTB1VdAnssWbb1CUAz3t0Dyu/iWyXdqZwWNEky0XxglOQDnn9/d+7zbVIRiiw0sWtakTKtSQwBAFUO2WPBJtCFrMo3ZxcL9pb50Lqy+P3b0q87HaXdrwWGD4YFhtRfWoj2bBJiVfo6vVX3wcxIlgcENsufOTRkwfr9r/X/VtnTdtfeFz6BSlhJABIuY7rtjK1Tp+HOfRQgWD4+z8iY3/L1i96nd1qnV9pwAKwM/qES1c44t26FBeUFMfvgmPHiluV1C8GNRjOOvGV/dWiJPWBEEz7QE9D/7y3PAuWbBxSdVHgx7EXHiWGzDWwByNQXrdEvssgDxf5PU7NlOqfTc+V0SudS6Tv+/4e2Zj6o5WAgPwFD7TMA+gBAeQUMtE8k6Bx3ma5MKXDoS9xLx15yjqvogoVu9itPSDncEhCA1hRfYewiG8iQ6zQ2oQOn6BJzkerQHmDF1v/9EBf5Jr6dVWJ4CO2LAAAQAODDP+ErAcD1M9Gv1+nDV22fYwaAHQAIBLWByNFLACCtC94KOKTXyQ8AcAc8F50magIAADjYHnpTdhnoBi8Bz/gfOvG/CcDdDt0nwKueAwB4hCjWo/l+aQqGIRpLDAJAIqLnIB7DtrvXY/RUeZYG/oNo9vddTILRBQf8yewvZ1+dfX729p/V/Uz96a8+nZseP94FaUKzEFE519GbnMXjHxCO8oLBaDJbrDaRSbKi2h1OV547vwD+BxUWebyazx8IhopLSsvKKyqrwpGoXh2riQPg+FpwXJpjAAI4OwtsgNV+wy0AgIcBmF8FQHcFAD1mAEAlf8K4fPhV91EUlZn10LkbrSZEhPQoOXPv4xB63Rj2WSpQG2ch/kZmZyKls59fhrN3zz44u2R2bPYZXZj90+yDltlt4uz2Wd/sIf/sB7Ovzz7xRsA7u3s2Ypn1m2aruNljsw0VRt9saPZtP5TsszuD3v+5b5gdEspnuw3FketyiWt20+zEe4ezhnBg1vcvV2v2w78c6d/N8rMVsyZjAW/mDQt7zmQxGhlvJJjQf8+r4Ynf36X3E9MO27Yxi8G8YwN8B9AG+eA1sGBzWqEDLTn/gu0HTFUSYG9pWlz0o5LGgcD1MAu4H41ZNwxH9adWifuifrGzcnmR3DCjvhpOxAyl6sUrwGX9xFdJgkpLqOfgCwOMbXMqtwKgDcvTArs0sTgM5kfX/ikzUIM0Y/AwRClybsGauAQwlIcVg8vEHIeibbmp1VLwfYmHwUi66jf5F7Q6MDvnRmaQIqWmxb4gjoCDXg4Xscet8d+zmJUi+UmWASiGhgHfPVxiI2W064fvPxbEiaZgiyGKRkNxwShgEqzltG1oKww9+TG9/SupJF6Wk9W7AxCVSJppfkjb1V/FcZxh6lLkuCmGr59KRomaDjT+BWLRAa2ODAIQEaDF2ebeKa6hDqGYthAFR8fSUz/EIqrjZz1sJrgJSU0Bov1EFrkbm8ujpDHFQFAf1tPDoEtKxZku+VavyGw4S7of3hRH1iBKQLCEeEVFQbFIIulmTzqr1LTXAyzqmSAHhNFq2/eTMOPIkKKroZj60Rji0SRSVh4lSiEeEtpk6msOX2Kh+kVmuYhGabMQZI5Z50G61orMumtNSdeOfuKihL4GauGdMpHxqPJvdBLDfSXvVThEScOKrQSx7ZAuzu06ypI6YwsGuMWZetbMAIESpjVESf89484AFKZM3pBUrCCS0px8l89ZvIsVD7BUjStclmGh+3RdWLJc54me0jd8jhp/qJEs2BzYkIdiLOOzD07qFaWoEvJD4y63nIlAU0FxptgzbAQhj0IbQRJVh7VW0Mw9LjQNssPE4um+dXmG2ESDvYl5DmirktI6LTXScu5ApZVaG4RM2zhcbAcMXeni3czDvu8uP6zfK5+wMCt6HboKqoNPSA1DOcLQqTx2cTSYSNH0TJcbW5TSzT2aNDgS687l1/7L1RU56eyYvdoPGMSU2e6iCmcyyMkePdhOubuh5bIuyxW4d2fQrT7lu+qICD3UkrLqh+T2OV8sq9G2RMxaL0lAVT9ULXVMTYqXWgxPe6fdJS6bGe0vNnNrTBkuW/QVfHAsd+ye4kD0tgquWA/MRH8qfTKHta7vH0gDuYEzEDUVrcVBJkBKuDhbW7xDn6gm7rXDFVZunJTeG7pfHBNf6VsJ0JgqCAGipMf5arrE1ohVpaRZ3c4hd7ycOGf4jBJqgilL7peqcIRZFU6dixBfe0Jt01eRcw1lCzteUJvKYULPZRqFrQMzOjNqCWAxuZIgMEyeDXC9wclP/04P4tvvXjZt70fPurwnuIKDQuZZTMxhdaRJnRkfyUMYs/cZGiW8NArykRsBnmF7qLsheRIC9e/IF4expS5ObtiTtsQ9Fi7xi6PrkevaWDfomi1D9SOF7hLLO5fCPGbi6FJDMSPN4ABg0WQTuzztWwDdNGaFVOymYbmhNlPxfo8NE7weVr+Dw9qnter+oN52jZw8O5hoC+sxR6ZcOshv2rUiFhBFbTFQXUum7oJ7g2DZbFrQZoMs98MEvIFBs2O8zqjCDkIEHlLvNFrysO9KybOhgkXtWFZSWwblLOVQWI0sDkJNzA0z5mKfRRcACdCBCFlFpX5eOVk712/oXWHaujNvfwiT7y5OHkKdS15VNaf99e2DBg1Rsb7YiiYSYb/sfrSQDFNcde9kDnNv5AW0jY0lAYybmpdQyC066aJW52ZYpSbYBpzCrk6ApCQ/jt96L3KDk9CpcUTqvHvSqYOZFUuXFE7qhnqga5IaKllIzZwy1gezjU8b+Rbs/xUv39VCydeMYLQreSW+OcFwCCbkmakiA69h6HfXVHt30Ze0vS8jz8kjtk86o6oMd6ijSZmVG804mQcad3tDOTyV60tTeWTV6ATuxbaHMPUGlw3FzWmlGCZqeFTjUoBQUFuCZu5Er3leTYfssWsneODc6G5g27S7cWJf1c04iQsceUSfEbPIikyZjsxe1vBGznPoyTB8UKTY/xzzut0odeaZVffkY0T76kxhBuLeFGjehbbBC6ZMXiMYHAisBT2HnUWP9qx8pQgVzemET44LE9JSu2GiC/JyX8pLlsLSgRKFdNLulLCxcS4BBEVm4iwpZsfJ27pgRqs264/LnTBAFIFy4IN+oV/nu3QAuZSR20FqnrK2j6zHI2laDn3J7grAO4UsDM9UErHgIUXp0SacidYGYL4P+IXkGPKUnpuH1EuMbXttZ0D6zPh0Q3Om5S2uWkWm76pnNLqipib0bktbPmHAZ0tAjtS03M8IOgapyixmR4gD/ILUzM/focu/MAJE8f92GqUSTwLCM1ylspIpL0FnNZwejpwfgcrrAkgNaFMkJoy44kmNSWrZ61a/KtX2U6kw3GCrvaPYyYcp28oL1Rsiw1TzaIkixDTlc0TMCKeawjbX4DzAHMzwLIrzPY+nZd2Y1qxFCx8rYQgxEDsraQkUoTfBNbvTYvHlsPtLgNdyvroo8zOVisTkkbsmpRCAfxqGHktty1mss4wNPL2dsTJvbB2iJofjQY8MjQSZMTS0hdMCdwnrprHUUmyIhM6TcgkWpWpUX2J0t/b0gw6AHOKX+wQUfTEICuTor56hgKj8ZbIbbqt64jh2YMrjmu/Q3KZ70pocBHshETpmVCIVsiEZl0+cyErqKKiXrWeFiKcsXMnJqwUB/LFYgsdVfKmuekvJZUFSUljqaqQlb7PiNqdNsl7ixL0as1vOrnPm4/dD6lla8xWtRntoaKtM6QUjuq7ILaZ6kmRVTqaN0/IyDZPSpmfAn2epcwBoncHmFbl4aGNQZlT348GGRBwxCIDOS0hOjTUXwEa6DGNMyspZwDZTDaf6dmV+qD9LghYB7xQRoVFP28kDozxeyGQenaToG5KR/SUpGBt0Vp1BjGY5FIkikX6iw25hiSrtDZza1Fg1FbpW7EAw201CwJlMlfoRpM7RbY7D4QMc4qsHlZCNGPIjrkxcp27UF28n2zkAcF48khrJaqbdUE1vgv7xe7tpW2DGrPDIAo42BjFnPr02kzOnlxLn+XybSZEKOMUarfAXUTt6cSU3OxMxM2lwep4Y0iQseagskZzVFzcXZBoe4hc1zoO2sW9BOpVnUhg5C5ONQUPwRGk7kkvH50bDwC/rwpherb9eP54D+Hc2KugkTvLFF6mMuPkNZUbPjW6L+0N5W6yuDp1RWfJRy8gWVFp30IYqxEvym/yN0s5t2sQFW8QmDmLnzbS1dVKrDh6I7ixc+8P2TyI8WRbvp4RfVFRxLEx8VnGxUu70Xe5mqUON7LQvDYdyTcqUMjgIU084pHfzaIxxpqnI3laSCg+QPrHWKnDeY9Bpt9mDEsScDEreBKLLkSMWmktbJwVR8g+VAhfLTQ/aSdg4MohuEC+/CTR+VVwPAbE23obPRTjpJWhCG72lFpu9mMhrdRdznM7yLQCeIqS43l4XuOWeANGr+cE1I+QjyQND9Jkn/fT9q2u83C21oYox4pg2uWg7c4I4hYXtQuimHEx4jRYZHuJfGNdb5RiQrhRC3ea8tkppkVo61ufxd0KHIXeJwqq7ukhAdRiLILJz8W3HJrpJPxctRJF4OS2+EumE2TrkG7xJMH4un+16FomxNWswFwQdCFxOZVY6bovrDeRrxkvhkC5A3it3evgzqAO5hM8khVkt1W30vNAwinaSzJ72fjJnSp/EQWn2WQNZTxsQkyLha8EehRSTe3KVqy8TrcdmAIkirXki2DKc4NlqhLMOngAoB9PlmbiLmaR4KG/ExUXgTh1EixOoZu41tXBW08ZrW/VjSOpI3b11eXQc4rTo9InKzXXv7uLVho7xjaiE9vG7r/SZFRlCfTnxC1MvqO0FNx2qJG2h71XF2FLKwOZ2TS5a3LtqVwaAxoSz3jCmZOUxaLDtSGUTZAUxE1Xi+jAq/h2cfp4wpb7cRtkULe7HedwG4sfv1a6LW85mgvo0otg2j67jlW8KgSDNbKGQlFFd8dUOTo5F04O2AgwZZG/8LFbFy8XN+Y1H9R4rme8VzJ2zjdVTK4kcMM7EQrUaBi55Mc27zYprbhPDTQWbEDcbqSovwVRxDlFmQdA3eq7m2M5+Q2+SS0Knqvj6dE+sKBgWqfk/GIO+y8KUnFCpHSQ2GdyLF/KYDpP5sssZfRllso2e6lWRzKdadzt0ud3q0J1bx6718y/oTAB9FrtKUex27c5ackie6CzuRfRh6BCbVw1t4ziNAZOJeSUWMWuYR2EK+0ATVYXL+FZX8nMZtplHH87vvbMQv8zewODgjW6M/4XwiMCsguRWgU2R5oFTomK0df1Z8x7eysiXW+TLlnGsozqA1Q5YoDiiU90sKpYuHx48bvkup7VGpSAmIR76er3GE/KBEcfiLHVUbZTd5/cJ2hxtWcYzlLKYAVursG7xvuis0SsfJEeRa4drg2NXbHkYasfVX+zlTi+L0SamgPqh7k6LdTVprDZ7xsla2Aii0m0ro+aUFSmxs+dw8jyX2ec7c0y8g262XCIpRlzgKo+Ntp8LOgde++X/nNZVQZ4xiGtAbKO8K9Ad1OHZ3gOoc5vVqM8CCsgmBTnYcyYeqbb3W4aV29eKkN1c++ygDnmt57RaJC5dgZEsYxixeutq55iLkdnAfo0Cn2ATa0j3Y1Cgmd0oxkYBIlqrmdG2RtiTmlmYRUnAQXUZBqLFzpyAbdM+xVoQFz0Pope4kKOfABixLZuM3kgST2O33dmI3FIqYSPfQ/eNo3Ima7bngvXiMwaZeXxN2sZvHm3N60psj+MfkDMTxgfO4Xsrwz50VJ33b3vRcHnRMaAUsBGTYoCRCKgXFO6Jj/VwRZdEu0r44ioZmkAngHuk0wAtUUhvN4VtG8ERG1FsmxaBSLYbu17dJ0rTVNqmv6h8xGO+i8NekCMpe+8dR7oaogQPjr88nmHiwwaonTl30Ijcctptj8NT2ZsNmyaXjT5D2ZLx78PGeDHs2ybn3QBYYWgT6vpmoPJ+xZ6hoHWX99pcnJvFvik2xKObOsasTzLkJE4XWziSgzgiiuEVwDU4B94D/E/ZxOErWpuVrxugYC72sMs5f2rd5x1lmN4AlbNw3ervyV2rlnqA+hqjftk5b+8blsswsTTNp937tA2VFGzyHFhLyDN10ToLtqMW+AB5iMJb9AyiQKzIJapJxcd0sKKKFNnDNfG2JkoRyg1bDa6rEx6aC9+rjAFXpnpqTm/n46i4RymA3LtBH6khj4gDritp2zb4A7C7l/KGUuSR4sbsZDs3aQ02gdFLUK+xae4KGVzLxbtCiil07XTY0WQtHt7Xajh8aeelu4tuXHoiaUzcHzXkYe/H5xlKMWPTiivSeYvJ/R2J0kdLJ/vjE7Eii8fu/27ksosn5J5lww+rdj3tWNTFHf/R0U+UfSLslm974Rr99OWT/7x8f+fhBjWa2nwuQdKT4oMf/SwHk3v/2ntXbNBq0vYBVpNmCOEkIPFJ/7qZOiu03VFWrKcWzeHrnNWJZy/RlpSuR5ERopz01s6I0bewhPyesNlmRIRoVDSZI0Az/ZdKhAbTBA0roYH0dQn2wvazZoamW5Lwx0yND4ZIsVhMV0yXrZl3XNTNsx5gZ4Ri/sh5Mu4KHCj6Z++OtQy/Nb1BpTe1W57MzbftT13WFD0TaZpNW3EeVLybHvwplkdiyT9lHCJTyjMmRTGbThxcG8OgyhC2ykCzx7dJsmnwu8BcGG7OEvV1GYXRQzqZlDEln5CVIFi05sySYih288KIci6vodSx6F1KgWQ1kzK0MTbbTX30lkB4Ze5/fney0KxR8fgbv3cC5K62wvK5QPPhs1ASRacDVMRvWNzQWzMN02C3Mq+U/gVrohu+yG66T9EPqDCakNEus4ii578NRXJp9OVkjSjBQ6fIMrF4lUFK+vi0xfUwXvf5rhgGpV7rOMbL8KGaLozbRL3bRkul4FpO5X3Geaddvc1L8m+/XXzZ/UTbz+7Z4zutWPFIoX6Ac0Yz3VTQeSmpveyV9rM2x+U/mx3mXX0RZD6cDdJ2iPlBzpyyBXYDD8wmBLWofOxV+qiWztZgX2m5lAfogs3oo1yncqYZ8WRNboIkHG8xa6SiwwfHvhvzefsvURa32xCoHdXJo9/1U5LhHAKDtCRxvCgsTW+ANoUG4Yr331lccY1MlbwUKzdMX4jTJwkpssNxcXKTg+qpbe5pZxJP+Tv0tjsQ0/zarJ1uriV4CcfzdnD9VtQH2bUeVS/Ytu784fG1dpImre0rl4e0kg9FrHYF9tHdlyYqzTmLiRoyA5BWDQKJXSXzNF8cP5ufQUDsrggrALzU3E9ZTC0SlS96iB58AIYL5q6DNhtqfj1VyAOQTXq1/RJomgnxMSJGT/jKdNQfQZ9mwj5AxflmXTgeZ+hhNNqpC4aVO9QjpDKsR4tEm9EBFyMLncgfJV+0Z1lYLrjS9/YDb6n2+WMMNSMzo2Bmh74t+NnDj21XLDJrGcoXaaR88GzN698R3JbhRxWW8ZGgSHlc9JGagjfU0oe7dq9dtediJ6SwBSGzFTRwA5o2n40HvugYC6rI7sPtrFCUxWQUCN4srIUV+1PgK1pJwRrt0JsTOEhtN/Cg+8gTD9SS3+okUWTnttsDYs3cqGEE+UPUmobF2drLI63wTGAU7cCA8SD049FaS2nCitFcROG4UW79m2VbK3/4pnoAFrLetCDuzRohpjNO+6OHszsRaISJE4jgH+Mwwf+RG4bqSp3CtXCFBlNiVXHcOnsSs4Q4aFXIShQ9qcFZPPRJund+8f5Tkb+bRbQtUcAjUsa+QnOTeOD5MDzuvqKteGkUIuikxi0oAua6oZm1gaDBQvjsOzg29DFq9BlYUh65WAOxc/Rn85NYasHSs3fopy7642bAi7o50h7xFBGd/A1n2HVNTFEAuQkJxfX11SMRC8aQz66GFT+t4sznbLqhzdLBtVXeYGNl6NGpKvkb2ieWRMGNu8js/zTZbCT381Nf/8P4uo8WdsL0AlAYN5dWuWPhq+i5kiKJXLGLH2oN1ScwjHQ4vwxfQysYG5FdD4A8RxrySBmZ4HmsoBCKKW6RfVwpzP0oXsHjZq6f2pNCit4c0zk0KRWJTRueRnbNvFbTzi3F4gVr2fXt9rFCgV8ieiA6dy7BJvqpD2ysmMxPRc8wmbqtvtPDFWfvKqV0moNtLd29Kwt5JJE8F+mKKXJ5qZpo5c8A8D+mf0K6H6/+hksGjYHMmNjT9A3QQewaHuPlEZzaYLYZ9g5pxCB6xpx0ga9hfkjv1cZODurNLKWVToeU99jDzAddHVZ4fyxSBgRRsYVLKN93r3LTxKSoGJyOF6sgDXFZXGFib8w4y5FciUTC4THAxn6SHEc/eEw8lcNCSzokHfRQ6tQ2km7ozmhoPAHyDYPfWTdyfYbY4ia7YtoQN8K0gpfKtbm+a2vRLxWKruCilN952Gd1pFpPiIW53gCIWCvWhyoNvRQ3IO9xq1pbolYV7A//+ONdtRIExkezjMWXmW7jaOypjT2WTU79ccBk/oV7tiLbNHjEtmXM/w/4ckjQJGjwiLgxNEx8lZcP3KRuRMpN1vXW2xvf1bpH3gnfZiLlYdKRX0bIhqaXJB/THzkKac3B/2dthjojWhqBri5W20FpKgQNpPQGM4Midd04yEB2rmU7gwRCgtEkpxKN3mlH+4Y8at9r0FD+2sEsHF+NccjsPTC2AkKfNfZusIdYqSORzCVhtjF94iqPS/6LRBcLeIbWtT5FROIZfibA1dLAMJZqM03UxHPo2kF6VL4ndERXnWNAyTmq568sueq68g7ixWQ+16xR21hbZmODGdQq50hjwW+KcpiEMpfJVR0L/0mY3tg2uGBxY7x8HhQdK92JerVYegRTFBYw6ECijyNoobGj78Jk+kbm1qDfEiUojMmksJyILQsZemg1SclQR/reoB+i89EP8XZUr+YE8o6lBEo78jCx0SZFK+todi8/+72J5Os1rqe9h9S2sBfstU+acy012oFQwmWF5ce4tdkh5brLs51zHigH3EpN3ZRJmYQZOhRO/WY2CAFTjQ8mQtjaoVV+Xwx1ZHwa8GxgV5WKjbBdrIQH4DdUqepw8GAt8LBVRraKMvGHwyOm37HhvkaxDC0/zuQKOoUJAMw0fvPCGIdC/BYCSR0InGkPrULaYxzTsU2z5aDA3EBz2DqOouIvqqHNs89fMQhwWO4d85mbK84yfonIXHhJIAnrkBoHo1xdIFXArFvoTfVuNFm13EOg09VO+WyrbO6bSuOGJMwWvcufi54tg4DkNvmiT13UxdL+Zk1bdLBXVk/951uZwREnayxeM/sfqXAp6xp7G1HJhWquo5QwZFkGuu2+XuBS/IchBChU69JGv9Hxs0ssY8dlZLHCS9xVNPezr9hB9PhJhIICzyuUrUp4nEN0JsZvI+WrXZFbegcAtTlyMHZOGsZpANJN9+AnQKfnRJ1rIeoTADTRghNLhQ7Mk0gBUZc1LEHege3/Ntus7jJyrme3wEMkl3E0ErpF5e7RYkZp5y100ZDcHz6S2XjpaKCdaxOvw9vqVEItJv07atoARfA4tS1AGq80h06jvvIfX3xwV3LAjM4eTXc08mU5cUxYdmNPN/dWqoavuuTj6JuUFQbtyKyPVH0tT1p5f2Bh5AT4PIuMcxtM6lXKrPSwNL2f/TVBs1zHEfsNxeu5qE2x0YfImp0rZuj4HJ1bhEi5HXYgMujqKLxcKUZra4TIQRTnyzD/v7qarM67YbgU6s4EZZuMY0vrXtKc3ZKO3ovhhrCdgzmAvmIdXevNoEEqoIzLWB3tZPuAXbWanxgqIulHOe1zElB7ETArEeyPWOutlWYP/TJOos02HdumqNbdBoBncIsOTLtoGmCsbbHnxhRtx7Tnc6vVBJP1zZy/c5Z4NlTlmsZ2mxfmBjlXc3WFiQOikmtRIKEppBD3wHyCNKyuJ12Jav+HONvwiT/8sdYNZp2Tl3TV7tU0LoHVkoeGlQZfgkbu9+xrObpgQjXQmLsN75rClecT6Ay7KAP9wfxiIA9i1vfu61R1JX1Ju97+FW0UkODHnpOVpcJjYBzrnyl8hg7Qqy0gCPbLBGZD/sQYYW1+2XYid+r+IO8CNvu9kJWvA6WNxMudicWkg/MYANfYkCK2dpxZlQXczsLb6m2vgDGYMeoXB0XmKq2HcohKS8pGFLq2TRzo5gF8OBcNZMTQn7VflbvFv1x5cD/GJWshLNV3SdnDR+puYCNmqKXAOZAnDsf48NQXzReAHI467+uyD63NDuDozzOO9aXBlYlZLY/POSf14gZ7IXXx8iJ28Eq0KBQvP/F0CpBNI7vN84nshYYB8kKcvGaWu6dIyuVAafbg27f3RLcgSChdkrfE12gfh530Td2WsX7Ffx3o6wzBPb6lOTOCTYbV2OIbdYv/uh8JOfM4/w9K+BUiZReib5SMJmkZgo+wmWA6Iobgj2Jdn68adDi75uYabFbxyqJqR6qUgjA7xidwWBCwBVaDMR/I9D99/0GP/Nhq9dVOPGSASo8NuT43olwTL399d19il+VKmRyHtwLBDJKwtJlwb41//Joq6/gXBnqfifPp2T/0Up9Pe5czvnCJg5OAQ7kpL5ty/TXa558Wm/2VjN+9Ym2Q7hovqs/1cfE12db5DNLaZsal2dz7T6zG4VhsnCyS1alZM8/w3gnngnm5slauKaju5zlRbWn3Z03AtrGDqfCXnxm7y3VHkyYs229ltzYEg1z4ffcQdVUsBE3ZCfpWM22CceQ0+skGUVEb1njk6iapCdrWIY249+wsN/kr3HUigu43O8PcnDXv2cS9YjN/eD63sNF4b+dh2zfTAZNE6KRzGm8ZqOxwRrhir2F25xdMf4fRO5eyvt5IMxTsM+YOfoKXE+chaF+28S4MxiwfXYtEp8Hch2+uF/JYPsuH1NQBdi8kQENuKKVkF8ygzTJljvL2PQnNtnk7iUQeZcxdAEyt0j4pt6ZYgcp7lfc2LmAWnjB5GKP+OLKuG5ZDvJ7Vb1icPxhj67WjbUPB2ZeU1owiskmcSAFJ9cG1yfV/laEx+6QMUNspD8aExap1RObC8UBDiaJQQCQKLENf9xGQR76d4fCfPMUiPbNTp2PItoNgvwlClgcNJmhoGYWCB68orrZ4/q2V1PZ8O89cLZeNgeoZyK3IcPccZZVjQvpTo5j2mNWqk0UDZfcVXWqOMCYh03KMJKjbkwByomJPtVJ1wkhk7wIHpGFOadbg8r83uZu3yh+r/tYpdxar7vdi8JJhn+uVsjDc8FfoHzBMFeJ2vuJgSS5zd1rq5pbFWGcPSP3OsqmbewLLDYblI0ulYR6W2VT0Nsnl7UCFOIEqQLlkuLQ2nN7feXR5YupRd275arUGK1D2cdxa35ljtbdsBPjk/xJExdZwq8c7+Hh5pvyY7YdJDt3PnpZPDfsjZZd5rkh9MddYNBuGmEDCv/2dum3THWirDE5jKgYgx3tk8AgInSybAFhoU3b3c6KeqrZ8+wHDpJj22zZAcA2u2s99zUpRbMfvuJnF20zT6ouY2d3h9ZyyNZ9zDYiJl+jQkU19DWqnFRX5pmoLc4/CE67jPDzuc0BNKDN1Z1aDbmV7qp/2Juqdd0lHW19KPEM7mEa9DtGUwrhjI7VAbP8KTQSxotnbQy6mpay00VrXRfug7+SuuTAw8ZGDROXPNpxjBbC4iWFu0ng9X1UdrtH6n5CHCpdLpmeIluYqOwlrPu6bGeEIYZvEFMFluHaQN89R0sw8Z6tD9FaHXGpz/sitQdLnTSHPxB9vIdcKpLKamnhJqxKXD4ON37ODA2035jWcv7xpltTssAehPNPYJDa7LFVDt0p7BA4tRGbYl0ItSDx3oqAW0BM6oSQswqI6yBBCl8vojOJXDmJuKiZO0RRe5+SS7YuAzZp5kDOd99dvn27dsjiNsPsYik7zxBc0BJTVa35pv1IyHDQwqymRwpHXZAmHTWPfsHz9Mfe1jOExABH2DBfJr7QUJqqoV7xMP828nRnf1IPZrdHXOtjBqFxhluE7Fy7k0ytEdTd90nc59ltPrBnct7GorXisv0ZbkMxcELWqANQ3cnEfmWMWz0rHJB88TOfr8Gc7NQ8BHc3fB6w6ckdvgOZwzVcZpgyfpd/dfNw1sxn5ajj4EG5p5cpOd561YrtGMEJ2drXN8bEAFiMhnHfR0H/5obG7ZWjQRXLf5ua8tWUQvScS9Tg43W7G8SMDEoyYU9Bo051VCUla1UrqgnYvBGDpBGpXlKKfA0X0d+fNUwPbKQCIrez4RxQpphurhWbMxVHhghM9lYqABzMGmTBSoRkT0MwgM4MOfcCQZQNxSpEcDWuXJALk66xPVlyFs78qyQPdJF/h0+rwrWxPahn/Mx76bKDQXkcKMAvPYddcRFR3OfwxP73LIe63qSKuBo98iBQl4hc+YRr07SUdSUb0DoYWWDK33o7fBsldlc6e9g2rrLSXaYlwR87hB947NN/Z953c/5z+yq/9QExy1f8yP5XaR2KTWgVMPmX6Rhd6d4Dp2YrKp/hwU5wS/dfCQghCG+um7b3bhtrVOpD7hj6Jv1eirb+hU22vRpapd5oBjtGliNFN33QmLtBBjQOUItcffs+w9FRarPo7fMxnO0D9XygsLoH38H5S7n6NWUp5WJW+bnJTmSsut5Bk/LT9zxEnUtgt6b+QKzTWD63yre4r1tPgmh58Qt5yOFK2gDtDUnCQa+qSwY6cisT3xLA6dx4PDtC6o9qbJC4/urm3xLp2dtm6N/sWgh6BsnVzkiAbDHEr+ikvNBtQfORiQRocDDDb0QTBee+1AliN5DZmzWv2Q8TDcJfZZW3aL508aR2bXNCc4rqno76yawHq5njSAo4L+oM1uEngWaGRVzapn/YxatX4jd4zxnXYrpt3hqtjoPEVMsUN3CRi9w42Gk1o7uOogfO1+L3NnKt0OSsZ29aisH4QJr3oADOd54YudQmmB+8z3rmGNQy3401OG8V7YfirMX0ytyHNm3n7n8RLbYWZQys2Sw3Mupq+NDV7RLrUTxH5a6lhFIBSnt22EwIDsXeZtkHW0a4L8kwDl7CdfF1++MF7VM4k/or9h5vj8orc7yxDHoSdxCK4FVj4eSUMJkFOilgalegkAlHv2Kp3wprXl7OWbgeIJg8eqEFXSaHrGiLjypRTxOu2PNoppGH5gw1JQ2Yis20YUZuwaOuwYPGkzTTfD3GZeX9hbluZVBz4iWzFX5QVbu9OvLf5VBkq2UsY9h1tlpElf5WGPdXb2V9zPaiG7BGjkq/CiBC9+0ZKeQ9/Lu5jJgVuvL/N245jUKSl3Lq50GmnRzR3+b97EgLvZTrv0P2gV+DmOq4ctlm2wivtbivMIN2Yd56ac8x1uzgleJn4GywUi6QsFlZXcs/BiC4U6OmCTgpqVI3XJ59qzcP2CgRxm6NoSB+P+cUgewBYumAk0oRvn8rP4QAX0fBfGwYm+FDuDjX+YrTYHzMlTSs2Y57q10ZJ7a8BtMD0dLQ5REEdZfduy8mXHHoTBum/594Bu9JknpREI3hcQv6//qvG2+/RewMMGvbqWfP87pv/3nxHtSP+vfnwaw3k2X5svjg4fAt7FYUHEuvn/jH/oG31RbK9e8NlczaL4pO77TAEiIxmZ3/3Omyf2/jhry0+2He1f+U992yrXfqPpn4Uhyu10vB9vjq4tJG3P9OgN3MqXl/vJgHRlo94/CPv+O6Ub/dtNjov6TzYelWZb4dV7B/HcO0AzWKyeZZh6OH8P3mmoX0eLboXwsmGX8yUKrv1+Ly56OfJt75x65/2ok7MzLv9Pa2xOaI5q+uL9JWf3HtpEMJfPv1k4No+/6/Z7Z2L42d/9oK6EdfX3En0u/9weTMhIv5SlJ99CnHK8MdwYXZ+cm5v77priqJgG82F3L/++++PCO6FJR0U/78l/Zxf14cGAGIElJUCmx1XFjT9PqstSCsP79VPVpuy9XQeNS9akPfiI6cLaEuLJLgQljwMl5b0XMvncJvadh1J/1sgV7wD+n01Cg1qL7L//jz6x7daelt56Ekh7KYlT9aelM1F3Jwo9diFIJ7bjru0+FQB146FEqTbhOZFG3qX8i+X/gkJjvb1nk7eXkkTuMAahyuLuZj21oWdQ0rNRPSDiPyP9IXUIa0W+qpE3AYJTLcgsJnpWI4Pi7b8BK/X5FkTgaMJIvWYBBW1Qr1DbnLqAlqpzJRAbi6PhTz/gj1SGBfmARKz3fPbE+DxO6xIdvMCNEgLoQuSs7rhio6KTyUvgLUYPSH9CTGjwnjxcpdonixhY/UlE1TGGSQQgKqVTRBhpg7IFuwjHkQQzTfeuDsN+wRqkIfPKqkwE4l9fmBlEPkReVdM3RH59TLPP8lkNlcNP6L8VZaAf1of3fnI59WnRlCyG+9y0Tw4kL3ylviBVRb0qRTKPgUc0kClFd+HT+FAMdCQijRzQyu2sZskZc4cJdUio5AkoW33KbIX06BU784/M2JKGHkxaLLdOr8BBD9cE5w+Z/OgcShSuc5lm6Zx1SJGzBkzcNPkS2P0ruM38TP+88WHF7mcdxU6Coy+ZNaGV/b1MYUYOEcuc/d0vQnlmABHkRiRwUVBlWQCZSfX0AyJSCwWTP+tQ6uFp/BBwtCXkAloDhTizCiTbs4U5zogwN00HsIGYOhf8vqFLYuMBnm3Jo28IeBRhz2c566pdhQgvNGBbfnyM9P2pXWVX9rvjV/4Ixjrh4GQIcEdJDL5DmLuwwKvQHIjJxXwjSGg6pkETq0v44DEpIyVYdGDs26j3FN7rayCJNeMuhVWwJP8j+YF1ar9f/0VPOhjvAOqWTOaC2UD0qFMcKvafxAOZfguYM8WJMynpK2sKQgrBpyNi+OnFLPRvCoWPottCcKNc+RGTFxWDKObdbOoyCL/Puy8/ba6Be9sz2de/lQyzouU7gJSErawoYw3wF8/AbmcsImTEegFnZI9cBRxS6krgzwzAKwyQVCk2bgJdg5yAPFMxDwUyRO8/+l1HkHo9WrRK09DfYqcwh0pf9CNQg4gjEAH58GIeifXZkCQW4UaSBYoGElh4Mkt8uNU8pejdfP8TapsLNbQEhSnGXzQXF9DmsOYLpR69EiWm3R9edRcVGWYJqFFmJ7m6WqsONEKWNy+qx4Ga6sWRlYx4RxcCDmihQGpBNpfkpmxCZdysJwUbqo+6TL3RtMalF0L3Pc2NdbT3djswvEaWOJvk9mqI6HPJQ2ogXiqF+fVfFBDan6AAm0s5IUPsHELcnIWLonR6z7swCT2iI8o4Vj1f+aN1BDIONeNu7o9cwocuGMHkmUscEVwZKnL2frXU3TT65cr2uwd9mk0VuftbYAOVU6EIGBRKOtQQkqezByc+iPZ17w7mqp1kVJW7h5uthkaO5AdTqskwcX12YDJEU+qHMLHKX8nA/v9muCGhtF1qc2P9TpZOgr+8yhp/jO+gnqMwChHlgHxpLhHY69Sw3p4UpCIquaDoEoZocDD/UhLvLvf//SRfzWVeevwwj0Q4iRE95FXBjuSxyOzoy/VzT1NcZ4tDj+zxq7ORw14fjFuvPpQfmGMPSYLHt6Yvx/14N7wrL0GdjdtdtLheK9cjQiWmB8wvqR4Pn5zQ5HjzpXdhdLzizprlhq3IQlLvN1WSAxuFKcW8zZ0zJy3PF4eqtq9vaDqoJaHbYLBWCFjuHIVN/ezO6nvoI5LqBHv7XlfddJDPw2UGO0jyJVH0BNo2oEVgr+NaxcpwMyKTM2Tdue3BENnqBtyVkTLuwx2AZ3LM5ZhVGg1s68n8GJ5+DhLCiVBEggI4rT4+dkvMgIGfmAQEZ3c2OwRI0Bc5APEIPZwAMrhbA0TcLUofme2/9dCdOtdl5pvk30tl+N1X//mvohAcRaCNoTZFSAQQuEc3m6gU3tg+kkkyi894D01dVohzpbVeFzgruhI4Epks2D2RlhGZMevxJBsMcsBCGSAFERyOgVaH1A5JmbhNCj8csmrG2vTE05mPk4+uL/lcx2PP1G3efmqxOOru/1jx42CGA8k0wYUFrz/CR0LkYDjcn71UCFk7iLZPY/72UV9BBMma+2swEEGzyJPsKtteXX8ZJ8dF2N7gVPwCEansz/uPFmdHoCSjX/pzo+Pn/92+81/By/M6ykWiW2m84a4rJU47aWKhll0bonIFHvTGDWbBqbzo3pvqS2jmkkWqO4KU4JDNinqsiASwOBml0iKqm1ZwZGCenvM98KsHTWhtgp6+JWNV3rrEaUCvUneEyPJZPay53B7eUQbjYibDAge3I2GVWSlVVHr1TjieaRgEZk4bMrP+zNxxYXXbRWyOs8uCVJ3acW+56G+SG4KzaC4t3hAMUKkr0tFRgiXWan/MVg8Mxp+K3LqxZGJfQf+1QyhVMvFMO5AWk8MbyIEwuZR8JcyYuy+jwixxf8+kQ1ogXe/azzOl21CW8PPCmejEmZJQHWiM4yc1JsRPRyBlHETJMwIVJBWNgFiv/zD0t6bq+O7mky3I2Oyu4hOCLx9C9Rj9jqkl2NVq3GWOTDEToNSF87//JEh5R8Sk4ncLoW4Ywi+NGUlBwndl67DlDNcvQGZTZPKd7bVQ1OeoYA3xjmXSvEIzlWTqxxpczn6AGAihhx1n7y6GOsv2LuPVTeiDk43GVqlLic9jJDNvSR336rmL+83PnPde9Yb16Qf3oGA1s7x1bzqVQXDELa7ZJFKNdA1reBVMJ6ljU161TcqHzRZllI2D0K1EKM/fMKjXZgc73U37/awXchM+1ctnwo4FJ3nLgRmiuuegcP/uo0ZdW9zs0RGDrQonZCsRb45mt8XTD0OZfcknRa4nEfkjBFx4e7s1Yi7W0Z1xIlNDjHODKLaiuLqsBnldBFWKH6TFHzwzWnY3CenUn1nA5cbwsDfOj/+yfiVyfvjFxdEyNXtwTOYcF0+uQCxFPOOdMESCU+Ep3rYXiGCdBpJBsDi4zjR9Y0d56FAGiIISA4JME2dOkzIM500ju4G8Yz4XW9vkbpjwNVf9a1W+Uh2UfXr/7R/qWup17hp4h3puBhykhVQiMm2RJzTxeP52N3u7Jh7JN3garrMiiTDUFYjDuhZtXNH/Lm6Iz+Orikug8lURIRFpLRUEvIxDuXTLkwsqLOk1hylLYSM2QLHUIQmEAPSUnJluceRIXm4FK2Z56Ft1vICb9pTCAEt3zK65YFXHp3OV+M1gtBnrHHAsJtsJDgy4NAbCro4lxvmyTiB4GV/NGSEoZpFXyDS7AEpI4xTuNIF1QHjXELyqnD3N9fawrFw5KgYjtqcGjMUHEAZzyEMtABHVuUgoqgI2RyGJW4Q9l2IcEGIE45+E10JtSPxxrke8Y5mj4YRhH2QZkUpzvRyvoYa7wqoc+Z1EziAJAP0VDhCIi17p6Z+4p3Mt/8h2F//7VA1czmO9FlTEbcuSP45hReTbbpa3HksWvZ3xwZUW25k0FBkD8WLW4qLHvbPfUeSJR9yZDl8Ipjrd+vQUd06ObasBYggMJ7q2vbR0fMH33QV5KYC6BmC9ayEL8kaqoGEo3QBXDBRMQeqozEC5CSjLhEZs8QttSLXjQPldHInyhbhKGCMsyOu2YM9T/x2QllcvsIog7gfEgog1vxCz1gETvWRVYTkMIByspRfAcmuc9BnELJhgFQoQzaQNkqwze2qHniXSw6IuiAgEQJR4HuADgLLoWDXzRSlBE1jEZCGMkYRBlUEy2GOmfGr3jqvR9WYmaM4J2ZE0uvxi1rzbhSWFKj3skmvpIoZC1rjwPNqipdFoB+ZQYU0EIG13ECGNJAYPMvM19848Vg4JOKXeGRtMwYH4L0hW4o0VZJwG0c1bFVrXaBPC0qR+gQynJXIs8OIdmhVaazQOMR/jbQEum8Ub9apeWAc+D683zZv8MaPnD7BaldwRvQIeBtyewc22zAkIPvWK/fM6yBLK60Sm4eFAjtB5DQjcW0NYj1iRkUTcDVWRDApsy/o19d8v5jY//rVUXxDTI1PWOleVB0EA4TBBKJCRG+0CA8MxYlngjNt94yvekf5sPmBztAa7IGw6gJgsWPdKpgvoXKvPSg0ktRTHgARpoO+jFJcfFspMy7mhC5L2hFjlUXxrGMtMUysh/+zmEiztLb8fGp+wbKNSqoqVO6c8vpcaO6xO3/c9puji1jEG/P6j6Apmd9oudiJEQSKNKXaRJ7/L1HjpkHniCxxz3v91jWoC7YgppI67kKkpRYpvPmSBOMQ3sCX/uPXXmulYOdQKYU7JMKZTB2Y1e7uKVw61LPhWGAhAhgOqwhIkKFMeCFrK4ap+b7xe4tNkht6RVlCWnCEF+JlJxDpgDXrP9INKoaBvJdNSA5YJSijCD6ZwzJETADUc5UyMRmejh9E0tKG/HqXWxRHMzlRdeCANwNFZOeMI7JHun2TyG+kxQvvaj0nFtP1FnySI0QSZaOZelfLxkLBB5EhLse5KETcufUH0l8AvToFP1zbaRYHvg+L3PL0/iMtoqbKM7fNQV4IAZQduDRI05STRRY42jGBahHugAEsEju0E5WnpcTbE67PpophZzGQlpixjyuSp22kHBm3a6n6oUA+anKUCVsrptdO3lxPLZCfh44XlMDsyKnLsoRAj5hsZ7ER/0p7DST9XPmT50T6bVQe4OLbZN6jP6FwgqKmcQKXP56+By5UfOXEYkamhJ6chphVS8FTIhZlzHkF7ujGcp+/btA72xzfvaNaNI44Vwq3jYyoyioUDQCYQKuQwyUerfR6IJyL7T0jAX/ItQJ7SLqzwiy7LDgcnpaCnfZbnzDMTfBBAakIxIA2tGAuytwTiEbek5W3euMIeBgdHNA4ESQj1YRxKme6/KFVdXD93IDMLTVyF69eB4KOVspohde7+WS38GIebbLX+0CEqtXEheUFWsBzYxrsn8IvGLrSGmlfGI8dXN1lyVdlRIkxdDCYRhEBeN9JRl4Zc/y5spIrc8JcfX3bBr2hGsR+hnJ5jqmLlzIdVlML9mqARrF3BlmbOx9axjY5DIi2wtWcsAMifI/Dfu0KTFChId+1UJ2rZWusOIaxizCrQYjb8n3WuMbl4Tw2wt+kvFodeOyc7RcuwlXLyz37wIcga0Ruk9zePbpqxG5bQczTvRTWl1FOWyQYXzaZegvktgPgQLqd+HVpOojnfC0xWR8cEVFQ8nEcqfIhK+GbluUCJ/b6sdA3mfkvUbKfm6smdsvOSdJEhBwETsPmypZzqTfubL4Uvq4DXsxw1xtD3XKdmNp9zrAwhyIu4YGQRBD0nOJEQYAwcr0aIIVgEJar1UhYbPaB8M8Ty7GRnZRW431aNrPZLkgyhkHEHFweca+dDfE1I7BFJpbplIqQ6lTmziUmZCeG0L5Dt5R/VGsoOwM5L0WyL3wKh4KKB6L+R6K32UkhscYSFjNDKnIVcJdXc52DjjJZ390yUjwCFwIncH/Znady3GsfHHQN9VchAFUTgwRCBk3JCMqrwQ6zNCUwnw5xPxIPiuOjdb9d7hJnFf6IoSD7qtAog1aJaAMk3sMGIPPGtpHAKdCj0ZsgKCptWO0JBeGyybsjMaiKHL+U6xPkSKCNtBTuuDuSs1CnGoLpFU4rDdWhJdTPtmgBgs0GGKeNIzU/kwYunPUWdTTwKJ4RIyJCFReLN8wZSwt1TkcCcYjAGEQmB9dvoO8UBhzcdk4cqZXiCuyTkcNtdberOI/60pXhaw5kMRkmdU4RCbyDm4FOseNouWnJU5qhhEud3c8K7NpcsST1995beBzlja4Qmh9siJWjGnvID+OKKXDT+iEEpC/UypYl9iZjDpMBo7bhWBDeQPlqnC8dt5y4QnBArorL6wBpc+64EisvPiNWHAZv7X7vYVPuTFgj0GiPfi/t3erVqc2xFviXf2YbLTm/R7R63l7U+/DhwV32ZdBhyaQOjtRF6+91EX10E6y7Ix5POAVnTbcDp8ZgZA6cRm6z3TGylbh5JAsoD7J23/Fh3FZjnijw/y372Ik/q3e81WIQfoO4uaq1fs6B5Q8+IQ9ydvPJWFvhj+/7E/us0PS5z+ych3D2NhY58Kx34K0A6VixxZAtKGmJTf4jGhHNZP1ayViMaDspIHls5Mnq+xZM9g+OoqweSQAWCYugQ76H6dE0Fw4noYnhp0iOsSSxTn+BvR3eJDQ6+CcjLOnxakg5lZbCE2v0O1VP3zmtdS9Qehd+cWNi/+7zzWY5F+Z0QuI4IiJvjrkI8lzxqoSD+EBnMr1nobelkP0NJPFehSMfcZMQKnI7l4Fite8YthAeQozkMOGeWnj86ytcz6jJU71W1vFcNCw92saMju489+6ykKk3lNpkmDV0hBJxYqYOz4m4I6gxAjyiqE0oq08PVM79a+R3RZkBExdCsDKSG7k0PI28L3VBSBl21hhlRS3WMNeE9YSk8tAR7HXdMACxgUcrerN9pNrHF7FhsyV2M4OdnjiCDhSXpSMS3LZGVrYT4Y7JKyt+1Nk37cJd0pmNT7T7LQPHba5Ewt5SoknUgu13S9tjxZicNKwJLDGjjUhgNOCBN6a10clhdyNsxXX2v4jahuF6XIIZbEaUu8gVPAu3C67Hb/d+Fu169O8rA/z9k6/wgVdhqKb6/ffzLS3/YN+bGCoUqCyfNKlShIgLt4nQC9FCDi4baD49QMP/Vjv30kwyMyVn15tcOfwUZ38EbliTQSACXnWpQtf5V2+oyR/fuxsUgnimN4/7G3n16RWXhuNNlGIYgWxT5A2XJ9ZbrTZlnvFv3NjLOVLGWyFWgS+2I8IfT37s+WZ/sJboDUhBZg3dQZAuZ2XoAvDkB96vM5M3PfTTJ//YooyY6DO/4GDH1UXXuE6LmowbPZ8Ctpq9+uX5HRDL5iVygdmIVAEsqaYKPg5FU3U+ghBzDzDRaXe0eEc5DImLSv5Gak8DYsDfa1v/TeOcQlWp5gqooYmg9WE5+rk846h29McKG/L4AUyTnCXAJUo5TsRkG0a9K8k0sx3cbbo0pLhqNmJfWa1nAke8/UpXG2cnMMUXnOC5dhhyTx+93vjnP8j+5UsBdOajo3z+td3Emgh1I4wgXCDMiRTd4ajw6yqFTPD86s1S8aUvvmE15m+qIAfTMkYQUofBgTuShGZoVRWSGUTEwUHScceRJfn2Qd8FJT4C1z+INs4p7Y0mpOe6E+Ol8B7RIdVAS5NrGppgmUoMKrmRiEykjFJMR6TdkrSOyXolbhGbocg5kVnF62VZ8YoQXfnNTtTyqujxiGwAbpw8F3L/vDVJuZe3O+C/qltLWpza6euRN/7ad70dkSHNK/DTcaHbqoo2h5pFJbh0jUxqCoxA+3AZ7fWMe2ivZww9sEPTIGA7/G0Sg8AiUozgICKE1ZC4MBkX0LCwmelUVDCe3BKDuDksesBMdqJq2o0Crj88D9f1o5IS7hEUkgmSgTSGcZ6ABSPHdTL2s7jfl5WpPIIq1iiD7bqXAAiDb91k/FKSICSU8hzFEKIHw5gDAEh25A/5klwzIilqQAhwEZMUUXlRCkL1LMu0azgamFmAWwPGcvdyf2zeyWDW7PjbmorEElxl1gtWHXS525o/+cg199rA2OC13wTyWOQmcXTFn59PPNflyTsLxvsQyaIcSPkDfKQ24eB6yT9q/AZSAosn2Bvwrga+yFz8LJGdnODPR6oCp9eaYOpURB1xwMO9vSg+bBg6NX/BYMxqFKwWgSIGjUIathuZijUEZayTidzIDOW6I9shKm1EI9QCfwfhNE4JwMOJOD0RGAEDgKkD9U8uJTvJiIC7ZtD+77HiaIIRE8k6YnpuoXmkA0PPXEKI/StEGDISGcqIEBA0sqx3IlAtRIDn9b17EYkAxBDQSY4wpqUhFgnsxByRqcbFvAN+CKGCiI44lgWiB1JC9VjcXZd0kGeIM8bgnzq+urznoLeDHIwg6IKyBSdFrGbQOMQ5iDl5OuVvSSMjPuC95EAHA2k/8oYBmk4HponsMk+Pf6cd929dt6xZHPD+et1Ii2xHmIe1Q0qB0wlWnvaAHjNWu5yUzBVoH5WNXcFXkoUdrPOVBXhf3V1q3tofHT7w+z881dmHlWh40vNgGqc4t3Gze0vYeZt3nrHbg0EQk5UwMH9a9dcrQozppY8ETSV5c5x8E1aVuDYfG1CWhda1ebhx+/Jjrsius8ufoepfn/P+7tf2uUs/UOqDTAvKRtKK8KvBRczab3TbqY4FFRcRZ35q/FvFlbr4AEFUkr8FA66zSOLoGwVHK+ufXKjC+XMheDHk7P9/xdBj21SO+z3MEV/afJveRCnSMaKQQZBGHuDXN/vSKALSjCCU1TEDEIYhR1GEIBYGEHKRCAF1+hOb2hduCbFtfc0erMEAmv+rpw/8hBTW2QKGLnHcngmW8jmDd/nHH7hxu4HW3gPsRaVisYGjRoL6iimWq/LuqLGsZXMNQ2iJcZXI7qbdDifoMRuD1sAHnFjvF39ud3z5vVaZvkTrEniyKwYT3Q0SZ+GPWtKuc9vrhQH51dbfbhStrU9qZtS6+NHgmpLuxOGvF+bkVE0onoVcSMKD+HbUewkPlBbtnGzljk3aw42tRrNkUKLxAZ31KK5XwOtXn25oDI6CVfv+JXWMyTOiIT7NRdID0jYFdyJl8qr+gQDrI75gAMmHHqpqa+wZn1gwel5Nw6taVF9nMklcMXbU1Z4LezoWysozIBTri3KQQQQQMEAGGSIAAZ4QRJAMCIAMIl2XoShiiADSGQIa9KIIo7+kCiOsmBz3LoFHbQW0CYUxyYLSE0TgQp9p8vjTqde/Pb6qBy3tX+r0+YPK/yHYuMAfJH1wMEgfwGTsXhDmOZy/RSJi/sJB14DnKRKKaLY5nBAzl8whNoNbwmGOykIexvNuRP5baht2nWnNHj4ZusnBqIobAiNy9YK6wwsqI2dawNwuIFslXV18UKCr8wylOGOkLn+2CLfO+9sbT7gd1/dixC4jjCEeGeV6xr2QcXwA2wYKd8k9pJDwldbSNrK4Rn7svgqkIZRFcejA9ZFM+w1Mi7CEWk3UAAx10h0S5+RsbhhrVfqsZBv2+aQf1Nrv3J+KZCBCHElRAGIQqlmYxldRiTHZIpusuIvZOZJlRoAQJ0Yyu0GA6gHvPtC48MML+zq++7ax9GVy32/2JDprr3rx29/8pUqW2sEmcUxLx9qV4RECMckDsIvK9U1JeYvgziheqWHAV+Jz1fz7w+tTIrd7LB21QwFyxH7919mH3hugWpdSv+bYeY76Zn66fSuYqloZV/ntcmvluifC8KL8t2u/GTjI/bbcdNUbAYgIiJouCZeZgQIvg2AdQTrhs9nsdY/KJs6n3xAuXDWquszFJaNZQnYXdr//vNbX6KuuxcCMVr1wY8u8Jr41EQNaS4u2xiZGPBcIeZxzQoI5zXOiIUGLTH/b9I1+rahQ//UezRMNrHeq36d/czzdvvm9ldX6xzc1HQA3E9Omx+KPb4NIFgwLvgUpqkbqLfkP7P7njmm0/TllQWPr0ezV+rblP/p0vVYk2JbJ09is4NhpEGv5ZJk5G3HJuJDQe8GNqaxRvI6pgCIRJvAwDRnWIKBrfDdxOCPxbqFWvx3oeYQKG5rCtC+Sy2BRZvEdvmqsix7VhVW+xeE2bkKLo91dhbg5sNhm2XsO/cOO59zIlX1ld20hebd/p6Bwd5Nh7Fvotde74YHabcb6gNi+H3XSoD1wf+3crRHmHzn/UYj5X5SFS3PDYngg4xxa38clNpeMLjkefresJBzMOtF1O7mNq7CsvpW4Pvk33PCWgxVYFT5rMlYXQ9iK8FJ6t6ERKYFkxdj48txQtabV/aYTfPJ5tst8kRdUvMDh0w6Jb73wi6/ePP3fnQtMVl7hhuul2mG4pjmnx2I13rYkDY7I8cQPs/NL3lD5BLX85mykh4OgevwfFf+9BHHN8eMPsr3rfbilzB4R6ATfdSF9ZUyEQDwuemQK4kuECp5vtXYLVnOhudVi+w4ek1vfG2xbVK5ueapZ2Dj5V24q2zSneFF1WRN22SoM5uVCvU5gs6nwu7CIRQXMLBWZpeaFpIx01dplhSFOK3mAtLbPWRR45SYkcEKmtxWveDv7QZvcQ9aUq3JHxDrS0X40X5gWanEzpVnK51NHdaw0DoFBQkYrZtMKJ/Cb87i5r9YunVepjIQMMiEUbYFaCTJAcGFCaFpCuS5cUAKt3lKLwdp2Q+ZUyfY75u+sbTm4fbMw6ap6e0pRNvXYjOlYApr5cv0qWjasEhIZSSDsouq46ClQjW3sBtiRFDeNJl/f/53nO+xUZwRoH+7wjsWG/UC3iXEoBxHu8EomxnHcKQtzGe2e745Nnn3cmGl03AG5OymHwpA12eLyLWYIuylIFZR2vga82e2iVoE4omHVPsMGgmuFy47atm3S3XYlLc0vnmyqfLKjUqqZOXJMwsXYNa2945/+4cqn7VK2Tl5zCF6S48gOcZwhH5Sno/wGVuDaoe3Xp2ERQh+UjsT5RKYSN3JYdshKKAQdnOhAEHY97+fGYO3Joc3hCohCHH/mta3fK4VsDX32ypmVSi0DogdqKVlHfhR40Gswcd7kUsjNYnW/x49MpMy4zOobKPYWJMT4sAxKB2/j8rj8jYt/8XXnTTUuVYV5FAv1qhlDEQOICNQw3ArtwAcplUSiaRRxsWtZZMkEQroBluMmg1LPeJMy1aG+MG8+yjxbr6LbOfwLbkmhcd3Aams8p6h/GsN9jOHj97VXrMHDRA2BCcWuyhOPeGnLBLD7t031fKC/sK9sgb7TNyBFRc8/vf8vem3JTTzJuaez+3NkIpXLiGPIe6sm9h4Q2X/Zrs3X7h7+uf5zL3CHisd/l9dzwSM2lB0PzCnhlrN6T7zUW+AQ1K5XTnpP6rYs6AXEHkP9DXkFj3CUmaqmv0VwzTzC7eR0g6jD7xpr4noWLd/c8SoMYiHbc/3Xv29JfbOnHUFCS9uhuv7uxSFTykKLBGwsGfEPMeDRCMIBzwNz7QnV7JPX37RpQ+Azp1vXPqGNyskGWGhViu+yoyD13SXDYrhDwXlImYlpAHiF906cb7lZWwWqZ1ZHP2lkdpogQWg+d1+4eBltwU0Y6TBa862ORULZzWTVcq4SBwS5Zo3cV5MtRzYTO2snq+0eVHPLmdutBl/aGcJnJ99Lib1iRECXMDNGcMbL30Fnst6v/rr0TGafR0ICB6Cr+lbOsevgiMyRLMchsXfrjow1A5ORTDMRUUbAD6BTxZcapp0MApwZzXn+MfePwPvV+qe97++fhidu2EmokSgEASkdvHv3JI7VQfl0TQrcVXur17GCRBZypdBgcBkMXE5ozphLON2AoC+tF2BEh7eAjye7ApA+R4p2yA3yaAD5laxmS9Xba5StZ4v7kUGuaDvEhg+O/gymFy6Zzf6WddTEOI/InF8HHZtVLqDJOwYjVDHqBD70CdNa0D0qtK3mhipLtztQpqzD1XimlCr19XyBsPDqu80nu1LPrFkbv5L8UvtObYRmT0UhhCiVIVTHcP+e4jiKmPiSuQBjLuPYNv3KAEP0Ry86qodSn4R+dBZSZiQMaJG525iXQRishS0pJfA3fVE9hJVqTIAMRRGEDBHmo4ZrWQCKP3SodPfjAnPA+CC41iE+u0Lbn17epmz7yQnHOZ5GdBun6t7doHlCahIuix+lxtqyj2q/uNgkrEOkmsHZq4JBOFPDxHr8gFYxNP3ddM2FsxGWJbk4UiALWEbigrx4C8s1yVkRYSkID/0p/UYbWmSusG/w+64Utr8aMUMZ2QkMCm4O+mMLF7rMHUW7iU1iKzt8hJrdaLk832DKIP/BeFwqRsblvCMkxPo4uRl4s0b8MMzO4deejKuBq53w9zoj6nkTHIhZcVGD2W8NEgfkZ40U2+yqQti4n4m4DavzV77fFpmSLKH16QV5+GFi2astD5b+7jf/H3Kunu+ak8RPxthbj1Zf1qw6E1QOwHF6fu1RqXCU7LoGkQNRVQ8QnFxtzBTeOac7mVKhH9lReGNHktu4U9csvh/Psy4wSobc1UvkOEdTiECIdRPfFYsg4NVjhAEqcjxW8GUI+pTcpj74E/j51evfe3LPdZPRZ/jsyk+uXjr7cmj36n/K31AyBcU7mnvNHEkjIY4c2KjiDtjPeuwoLmmCIBFkhD3A5n9a2IdLi+XbD3fGHioOudl0YI9/h/+Wu0Byen+ud1fgU5PyiWyqjy4xblciiZIbpDtW4oWynX+MGqpIvv1Y1brzTcK1VyaHIQt40wRmFTWM8qcvuAMrS+cUbXFX8a2Htuk+urSD4580yYalkaA1u3LAdficBF2OgeYH5q3UN9ntbODw9I7KebXZdIUfwySP1w1wQYch/GrMh7rtVnOKzUftnmzX10P+aMucToiKZemxjO329EwZ/BwMd5AwEhnCPZPoBo/8zhVLxsjHkmw+UHrWGJnCH47BGqiO7HttPd+ix+hoCehV4+UB/aTjBP4Kiy+dkNjqrEcL8v9SCIEoiSGCQETAnE7hEUlWLS++IQUDb2UCBZ9+Y0TnxzSUgkzge4CYZKMCNy5MQwERJI/iCKMRLxQNKIMS1PWbqRO2iLflvmdZx5qXgJpFD4poM0luBBhghJAI0SJ4YBzZjT36bz1BxMIiCWIHbzQSoxEoQFvpmRZhs4gsCfutKgGhDyZwTpsWzH3+Gl4o+8EO/Z+PbR3NbdKUt4+scnDQ1Njmfzm4NIriskIk/dlI9G+9GVrfA988c4iYdtQNOF13sc1SMUIO6mv6g2AvRmQbV8tyQiYDRT9Wiurk+wQu5enoTCAYVzHqt0Wftw34Ng6oqquqYt5vbu3Jug3L8OKo0dROjDt44Sgq3OEuujVRYI1RuGoRVyPnVBZbh3CIaukKw4yvUFnvcBjRjrK5A78dfln/tK++TG2LBR9GDTcq5kKOtnCRRDkH65AqpSUkWWC559h6Z+auSqEqhE092eO6NCYf4FBLnSqLImqW6XahelKig1aPI3qVOay1H4Ma38Uj8YP6Pf7AE/vqHui1jkXaTIIVOZYeeSiXXo8EM+e2k2JsrcvVbnYiByFlSM4GOmUi93I3qM0tg1cqCw+iroPZOu1CoV689F3hcraU8jwXrYQNQ4HHWzvLJrwKJXWw90qvyT2gNgwjJPUANA9aEpb4URbp1xQUh/4n4RqrOmMx9hrNT5uiWW2JX64t9OPYizvZ1tta55KbjNZ5ux+Kp6mCZVbjHFD8uAahSp77qIdvbp8fkawlWOYdqDit15D1AkdgGCJrnhapx9IZPYZhKXXlYDnMoTACObsNoaNcbXXcJv7nrS7TpV1T0jqNVeAAFoxcABlPHXT/yXvxLKImrA94owubwyZz5OcMbdN+rdSc8cgcCCWJbYFPhZ0QreBcD718qZnfv9dc3CCbE1MtjN8PybSeh5EVvPV2bNIoThve+20fhBBJY+JvX/Brdl6oIIfZ7+K5lRsnnVSWmnkICfN6kxubw363IveQPTDsY0DcQ1Q0+Q6OAIBg8l2EY3qPyLKdutczQo4nHxSZCCD09oA5fzsTdxA9PPI30Fqz8vbNYU3DcK1dRRGguuKaGKt10+9zy1A1qRPMT0qkBJeE4hTZkZEWFCdwor7YKFLpTNinqiNKtUPEVlhaNpOzByO3ftB6A/AnDc2Uz8gSCa5VA5jnfGhIVh3GglsQad0sDX2dOBNxO2B1DIcUKsn1uhOXojwogg6pknEDjnVH4JDIBfsBwwwKnAN5nZyrK+HQIWvKA7ygwzm0LsIMcMJqXHfrMm5j9f5TyQElMIlHPdGetXUNlPzeGSceyN0k8EvNxmPU/o6Bv62s9SK3FCYsVxMGZ4/mh0FGyec10krY5GgMVtbqMQEzFHfACudQBXHG0cliicLXveJGndObgcYl34p+YS8Qbx2T7qpHx5WR/35ZGUtLETyDzTXhX4dzuiF7O60mWhoZBAFGIAGbvMln8UFPVlX+sLE5mvVumdy57mt99NlIUmI+5ru1JzWx5ztCljUbPJFKjxSAdBLCE0ZTzwgRKu15lciqBc1I9DKxBq6EWkp7Xx29bVzsuR7GFnTvIO90qEDTFSQgarbGkE6MECJMsK0eAYQUrAKRZglhAGIIhmUbjjNmLTQO22nMZI43PtllFU8GXvCfmfxS/jBYHUu5gQjFtJEaPNgbiYRv84iXwz4KD5HQGtmlMoHBId9AWddO/w+PIKCRTSB6mRZk7DHssVvFxHKjpxfoGWOvR/T5xJiqBFdzEwt9t7S4d1Wv5Hi/MO/mJxfUOxHvoAIXlrUQ4VsxPef7+R72vSC/BOWYjo3XVpUaDUPBkZlFVs1mbif8jOucPYQQEoOY2omez20orUbEPVx0z1draXfHVlS8WkFVXF7g3vYfXB5++uDzDySv9JS5VIwJi0EJExOfny0qhUNS/8a88Mi0YEhYPK1lh2qCADnk2rsStnM3BuZ31CwyGF+OEWYrCxQJlFBrw0mDAAcgmu5RIlG595neU/biJYgQEkIH7+uMwdvucBj+4Em/j5cvs43Z3jjxZzE9lVn68snm6nqITjaTv2aX37J+rpTYOn/FwpM/tY760qkWHzPDcsTrLfhNo+p7QJIYijFPrFd8RWTTub17vb8sWmzNmj+hM3V9NW/GLSwhL19h2kRMVsoi/A7MfYbXLPfVCl5XakVdW8COIeymShXhvHz8TS0IiAvU6c8tIvi6WqBPHrocu7bfYJxWFCYjjqjiaIAJUOTcMpaxP/2OkRzm3Cf7q60l2F6Ztf6QUIlr7N0MvG14G/SkULn2DiGU5f4ef2Hb2uGvLzqJQnF/77MsMBZ72GtvXfvak+cnB4xcO9lUPLp1NV+/FGYhCmprMFw2ta1TMJfxPFlNStJZobwNLsuQgmPvvrz/zLFrP79/2FT2fy9vvn54bMh8i0TlFu8oCJd5p8Y8PgwlKBnc9MrLBW0Q9yPUJZUEoVxupCTV6TLdRs11ikHuivSJuJKXu4AW23RtMB5BDkSFSE/uhodUaJXJV4h7E2b/My/fIR+nGnSaC6okSJX0TrBe39Gy/6bjPa2f7j6dxyXDf7TYz1xT2HIwsfxzs9/NjfuzLfrLHpNv8OI1zihEKH72Lw7AWjEgstxSwK1yj9jLeOgJg17zful1RH4SmRi8c61e/HH7IC9YBPyQJPq8q3IxAQkAe5Ax8jVHw1CEl5GOBZZLgeakgDoGus5qhmkjHVgn3f/CEf+3LKBO3xtt8O6K6c2RU5n/Xrwndoq+hHbzwG6KP+UVJYyDocWL99e/aO6Oox2a+88FyaJ5ubwNRi7A9nqiGx9efued7qD+/yWvvmzOjwWcmTPrnnq0cF3rOyM11gc9/F55fKRfeoF0xh4YT2MNcifD4p4JrOzCgXwBZqm6/5XmUT/kUfXK3P37cqNQmqid1/lk6OCRKsAcd33VeeTuI5nRvQwkN9SUjb+/2LjSYKiTHhCJ2hsZRS1vPBjs+OiNponEj+d2Fvbnqu5r2zz1Hjfn/D9Pret5vXHrF4N5oqf1Ub7s6MiHO6/OtKU9Wcvid05t32PH2Jbfmm/wZsLhggSYt2q90GGnVQa63Go81ZK2FdCqXhtXWFC6YPz0/zg77z9KZ3gaVpS2vyuGuweuKZFL9y0AxafHhh/mNz9cKBhm7GfYMgnFuTDDAK5B/MG9HpSDruAvVu6UTgVL2mdg4Tn0Tneo4cbba0SyjcwYCUflEuoBAAYQkansaYN5MD9EOKPMpVKyBDnbowIYU4/N4nip1OsWCgK7+zbXJ+57+tT2igLPHxqHaO1re8OCCsW2YjmUh0157tMnvzInuen04S7kb5cxz9sMsjH9j8lzbUYVqZLVWRtuJbbnG+bJZtxcGjfcPF4CIevf9ONpQ8rOJ9zViX2/e0D6cvSrzmMvyNuOR74vhHtfHF1cgmOCydrMyTGn03ryGStXOGhZZ2nPXxI3ztip1anqATQYia332B+v/ddHKBT3B6jq7sGx5CaDjeiAB+TcXP1ipRV7dilEJQ6CkOhU5Zw7jh+3n3AI6r3UkClJ7IAT002mrvsfvwe+v/r7Cy+czak5lPPs9ugmZbU8XFYpeIytYMHwJyBwvmPvf7crYh4N6TtFTm2DaNQkTil0mk2kEc95IIexKIG7wxGTerJZUiCGvVokRkTS+Fj64L/ffGjv1T6oT+sbDajUgWzdWPbVKga7LwY6VWPeRij8CuWtLuVaBEcGe5jMFUO/H4kQKcPKCLh6ZkP8wqPvfTYFS7ac2BCz+PBiB0P4PjmvIbDswM0uNAcyqEPVrahI0oFs4tQ6x50Yv0bwRdxEe/XGAcYvmMQr3BapeIw1BirRLqv3+s/yDOqES8aYElIvSbpsVxuhajVOyKbY3BEo8Uy0oimvBHsBjjKgdaJbkOaFjMIlIaIaOS8D2RpGyjBhGZg7pY1RljtzWzjP27E4Ou/AYq429H9DKldUhC5hToIzCDp5aix4odcobKau/tLQ7uZ2QtDF/Y/3K5lXh8rm1nwxDfSNwuSe6NCm7z9QO6er7uWWlkOzPxPbalF785KX91189LmqZx/ZOROyLwxxU0O24ErGLeIrO3ek0y3Lmz2Q27fsp0XCr6BRRy6Jn2dorbK03OPhcrx3aNU42fp0oVRYUvLN0mKbKoeHU6/vqeysiZJ55ubof/tWAJwXH755y9ojh88r5LGufxz56vITv5VWGC81Kv9eVG9o34KgFA1PerRtoKf//AsH1+xabTjgOInR86Ftn3iTnsmhLAtPy7Ij6+3ALNRagFiwuPPHv1m1fZ9RcWLuPoweAft/5rmYXU6Mt1rJQaRcWxYqJfo4O/zmx2VfVPCgbvz1rxn3jzWdBbG2n8Pg76hQu2NywfzNN297z1wQYQNWkhQ8ecvkdsTlleGPBisf1bZoLdgTjT0/mDFE5tSflbr8L91QZOKXTM+7Zbmv6HJv3ZCRe6htAIH//uEd7IxbnDBJW1u/w27Wlzc3t0i0ynHVvtXVNBho8NXs+9EfXZotk1+U0Gg+lQYFXCZoOxrk1cyVgC0tLA2tOvu2HQdEcOD5z8senGAchMJfC2KZNdlI+OzjDF5Osgav9m+djfe0YktiuJZMUuc8klDfKfXd/2+19xebUsG/Zp+O1sfXjWsUHXLZNx9a+rECI543ol4H3gOEYPKUAvPoutvjFN6U5qxD/nvkjUTNwU1IJxARNqHBcQQ8byKJxKKtH5WzC0i67J9Ob2L6PkAo0ymK/F0DBjb9ubGM7hMjYd27pVfkbtIGq78B/vM9eed8Z0/mV63bELCarFb17uzeCNUkcbPOQ5a5cuLNoa2dQyKGhTyv2NTFX0TzhQ6E1/P5r1fs6uioKXA4JxyYi4YX8reSe3+97N4Nanmqh4O82Zh1y/PvPldT+CwabX/hTMPJwc5v7I9ApC9nn1aW3xxYcrBsydkZy56IO77uyYRWvqGlAHVMttD24JajVBhd5qzoueLMc3U/YRpo7Hhc3Sf8DBfSfefFsZ4eP5QO/9Jd12uoeKf4wJJINkKRUos6x5OtsSsi0wgXMNOcEDHQrCwE3lfNyPeV7C17n8jMPZhZQ3/oylQWG1oNQ5/qRqcYcWBtnK3ef6OHyHi77hvXp1XuYrM/vo0J9Y/fX5VU7e0BEL5JemzFyUOWqP49G7Y4oicLvnTZ2HxS+BxE3I2Xu59wO+cWFuwMBZdHDThdRRw1kkFwU4fg77DeIKOlwCg5eryDGx+uY65KqOdhMcMkmTYXOHBy+vKgsMQsj7gbnciQIokYRnpgXofP5afVpYKcrMQGDojv97Wcjt3rXRbv6/9c3ue8gOjH2np2wd5DWNxoYmDbxAHXbt5c8CS8d06UwT4p5jRunjR7CDUPBn/ltfZkf3VeLysV5qs9wnKO71XmlSfO8oehXxP4ooH+kgW0CpkkuVihEHAGBGvx9OVKhyETkDwOI1zisf19/vHbeicSndh89qVMuAXCTcbLevNywl2+4qEWhtgck8TCqL2UBqBqQxSV4BPytIXYQNmK3K0fnH47tZMbnKDZNuPJmx8POiwTDmECCudljiBP9/NFZltcKo2Qxq/h9CXfN6N7d1tHpAwSRXgKXq3L4TU4ngUNYFRBgWnIAW4mnuUCT6SIpkHq6XPE9RlKCXAAtMGha8u76jrl5s6gud4A01xH4YBN5gMojmfUACstW8M2F+GRRYny+3iuAEHMI+eqC+R9u/+g16sy2YabYMBj+ey2jy8syqwoJVRQC8royRPZ8ZTgInYT3wVNvSJCAcShoCpTBKv44HafRYalQT/rsbUX0fuJVJxy5CE8sjQoz/yueEf0VzDpQzJw2KYfm3Y6//fhvafe1m4Ei4Gu4uwjQHOgWD/UbZqmAwlm9btltxpV7ZqdHO0nyB8ZBwo8GufgqPjQGliIyAKCYJtNhC3IeZi53COEiXGuBoCIKvffLpPxrz2fvD3++54rEZjk/ZfxqUMORq7EUYQSXpAkPTP9ao4cR1a28SZ6SdDFzCPEIGwmJj2bO4vf7xGXjP5EoTDtAw4Xd4l7by089P0V+uvfNnLNOMc4J8lbjIqqVHvWXuyViy+Df732/2vY77VDg4+G9Yktfla3LDD/85m2Nb1//2AzyKB3I50RZbKLpReXFJei0DWwdDHq9rkw3BvBiznFg0ynXvnr8p+2/eEt9ruQaeKsgqUUYNGk1pbeks3pTGXxqZii6CkvBN5r4oQ2CiQAoM4A6vFEdNASHs1GTrHpWwM+Cfyy/UXmWzXgq0qf2y4dmH5JilBxjdBX63/fzuVe0+vflU9YcJ5rGbV6hvbnDxsEk/7ejgwvQXqDw2xCRCQov3Xy/tHkT7M9hkULuPkuTIj0G1O3cf4cvlONmRepokwZMhbgZYP53XffrZS1Ftzn9cGeZbZtO3JrPw6ewySchz9TwR5f3nY7ZLD4vl7Mz3M0pI9/3O5b2GPznfUJwDu1Y/Qg6Dlh/0rmUm0XY6EewPa3qv6K/F/86PGN4luX/kK65wP5Zryb4L0IX46/V2rvJUX5vsSXph6SXIGFPSiDl0HQqod/mWOKBjM65YU9o12mmfFpqPJon7Fh26V3n35CoYja1dTPi/pusGULi9TxtXNDToHt/WX9ov8gMcLo8SVg0eBF7ttywRxFalbjYh54M981VSXiPU6afHrKmC0CQm9kh189YiTv5A0MwYwjPvPF6IrWrsF9DP3Z26PkAymEh4TLCz2bPJcW2DdlXtQfXAZhZyJgWpG3xLcVrz5bd8/EoX+3hro0EPgyter1Lnpe2zudJqDUsDMQ94Hx8whwjs+zSwRwOtWeeub+isYfeYD5OJbJXq/PvPiWoMGO6/exT1jERjZ47No30GBSHSQfdtcXc+vLTLuSkXq0QMj7LLf7apnRBZ0qXBUnkUZbS4WrqAySkqVElUrxgzgxx6XBrV7jHfES37XvFty0rtQh/punuI6gVPR8y6te/k5rsr2jLgpRXIfVb2yKcMIURhOaQYJOQpFEEm1tBDOPJ0LQVeTxiQcjAkogGKQh4q4VaIsd51NTiCvhP27c8fTN0V+h3X9S/LfBXV/rFKUBDi1Wgu4a8CY/rd1wtuPXILS5/+rzgQgzElkyMFlHDAGmIahBVk0IAxTaZK07aJ0U/ff5uJvHLv/Lsgr2BGcCrd8f/OWgZI2Xm6UrE1CFyANuVmMKgFaj0GOQalDAD4lQxLF3ISxm1naL2WJHJnOeYOCGLQJqLUdvFAhPG4StG430XY70+qAqIu4itK7lQiejP+bMML+81o9VBIs4N5f+GbTkwhHO34HVXcNxR89C0wqcXFxfAElSRowAPoQaSjCcmnGZ/8gNRcN9c6RPd+1d9CFwyz1ZRgCEkIsQgCFAXEyWQxoClBB4AMJJEoWQIF3zQx6fRxRFYIktxgjjBCLjanJoVM8wU/DB6L5NH7CwAH6ps4f0R+h8ve61Uw+M7jqxp24OJB7sVLljnMXY51vpjDyM17vm1th63+l3GWfQUkczrUWCM1Oxf9CzrLXS6rvLNT3nSL3VfaMqGXqFFo7nk7wwTg1yxAhnwQ+feD7cIY0c1Oo2BGp+MSuxx44q7Yt6C391eyaiAS3laU56/KiFymyzJJ/v2YnSuh3KUM96BE9/lrV49V6GQvuzFvNb2unpnz540zvpUyZURYPdkTccdXKNBIuzulBrLyuuV5xx6Zc5vu5p8PDwcxAktQDurKheirYutkr8K6dBtKIXS4yGtL3wl0ycZ6tce2nQ1lo/o/SpjTMhw4SZLhCcIw4FQmlLs9tE0K2ceeKHL98jjoql30Fw6UDPt66DDi+nt/R7eyp9xzQBBPctRETOFIHVv/B/J5lcQV7jJgM1Rw865KvxT1n7XAk3V3b4vWLBbWfRq5nK032FXXrJFvWkdOXLJ0VxtGbPndJ/Z1pxV/Lb59dsb2kYfPjcqtJUjfZgJHXz+p8zs2uJzlx41Rd/3cvRBxdxHx4ucun/t9NgHBO2Rt9yFpPl1o1pdCKSGVv8QR37zaaclzmMW2KxFpF8sHCZrDNmMAir0Cdc5adXPPumJzfeMXAksTDyi894ZnCZfnujIDnqhG8/xa/PjX6Y7a3cevDGoXb9m7Ee04FlRfS2Zk5IsON/8axo3vi4wG++eiG+W3hepQu+sd7eNd9f3DzwBUVnb1uy1/dGc/LMpGnHSzUhOUPWQxj0HN0xVMl/tHQZdzo1j+OU1hqQOj0W/Kawq6/ytPZw/a0G8Vv5vStIYKT65cauL3ZWKz/u+SFuW8IlzXlLLYcT/qkpephIliWeJRGgnTj+fdvtld2KYej2h801tMVieSNugfDKhrfzbwGvTAXluRECbhtcaBZw2yUIquaneAL+GL1Euf/7TIwQuIQvGVzwFE/hf34WnCEI6I0fvfKzNUttDV+B9gBw8f+ElwKzu7j9VHCE//dbEgLc/X/ORA+RgvL/WV1fe/1BGgAA7QCQQ98U1bwtzrv7tKx20pRSm5wz591pn5/pEPHQdqfziuCEUbboW9HvaXD/Atadn1pkzFxvuN30l/XzxLeWvLUb8tjdB/ZisyE+JsfAGr8L6Y0Aeqn3r5Pwg7gCAGYnvwgAnAAAWeC2d/EpDJC+vVqBPYzO3KixWSV8m813AQabAQBQCOyLkIS8ABGrMGK1GpHQbEHOVrcjpUVEgZKok6ratAE0qcku/B6w52l8C4TzNr4NrPnG8H0glvDITzqQV04QFRZRy2OhTTaYl202Zp3Vxo0asEKlhaBtg43WCYwe+UqbJ47aJKJSGI59pkR23kD3dgEf61WgBVxteRj92k251Xtv8MTSsbDXs59jM3DVhqlx0vgWVKysVQcm1o+ujatjaqKSuuJYdEsxva2mOq5uNZbuBXlUTK47cqeWpqSBnpbR1STk2R7mhuW8La6aUi035uqrWNPlyo1k1hsnVm7YtLIKlscmqOROUd+8Y4VnbmwdHj1s3zdWX11rUkyNe3J74PIVE6M0ladyXGvbtHH1oibXFnJH7+RmSmjTIrquNTki+O/qCfz9dgALYoD5A8FQcUlpWXlFZVXcvy6qV8dq4rV19YmGOenWtvaOzrnz5nd1L1iY6enty/YPDA4NjyxaHAQJ5CCFPBSgARqhCZqhBVqhDYqQQYmuXLdt46qINT7Kb75hdTicDIuaaDgMEYiCDtUQgxqIQy3UsZOiaBs75qR/C4DDGz+NAwAAAA==) format('woff2'), - url(data:application/font-woff;charset=utf-8;base64,d09GRgABAAAAAHo4AA8AAAAA4KwAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAABGRlRNAAABWAAAABwAAAAcgxtSpEdERUYAAAF0AAAAHAAAAB4AJwBNT1MvMgAAAZAAAABJAAAAYHJYlnpjbWFwAAAB3AAAAKMAAAF6K26sXGN2dCAAAAKAAAAABAAAAAQARAURZ2FzcAAAAoQAAAAIAAAACAAAABBnbHlmAAACjAAAc9cAANc4BKegHmhlYWQAAHZkAAAAMgAAADYR5QgpaGhlYQAAdpgAAAAdAAAAJAuBBZ1obXR4AAB2uAAAAKwAAAEcKkRAzmxvY2EAAHdkAAAAewAAAJDMYwHYbWF4cAAAd+AAAAAfAAAAIACfAnZuYW1lAAB4AAAAAXAAAALsHaNuI3Bvc3QAAHlwAAAAvQAAATbMg4Xgd2ViZgAAejAAAAAGAAAABto4WnAAAAABAAAAANXulPUAAAAA1pYy9wAAAADWloq3eNpjYGRgYOABYjEgZmJgBEI3IGYB8xgABqAAdXjaY2Bh8WWcwMDKwMJqzHKWgYFhFoRmOsuQxpQG5AOl4ICRAQmEeof7MRxgUFD9w5b2D6iS9RfDMpgaxi9Me4CUAgMjAGYvDc8AAAB42mNgYGBmgGAZBkYGECgB8hjBfBaGCCAtxCAAFGFiUGCIYqhiWKDApaCvEK/65/9/oJwCgyNDIlCMASb2//H/w//3/p/xwPKB6P1nt7ygZqIBRjYGuAQjE5BgQlcAcRJewMLKxs7BycXNw8vHLyAoJCwiKiYuISklLSMLkZeTV1BUUlZRVVPX0NTS1tHV0zcwNDI2MTUzZ6AusCBLFwCa8x6LAABEBREAAQAB//8AD3jaVL1prGXrdh1UX/+tvtlr7ebsfbp96uxd7am6p7333VtVr/N18+xnJ35+RGmMsRzHcQgoOA3BpEGRpUQ0EokUIoTAilAQDkGyFQkhAhFCCVJEEixBkBNEkFB+EBEU8QOJCN5ljPmtfd7Lrbp1ztln79V+c8wx5xxzrif6ydefPNE/437iiXkSnlz9qnry5rNfC/bJP7r+Ve/+589+zWh8++RXDV92fPnXglf/72e/pvj6TXfeXZ5351/XZ995qv7cd37O/cQ/+Ytft3/zyZMnRo1f/FXz4+6vqF9U/4n6O0+eqHnw81Ar+auDT38Wg59+GodTNR/86Oen6joMfOFELeb4Db8sThy+2+/2Vxp/73b3d/e7h/s9/r/d3eFffoOfb/nileJv7/Bnz33UClsdFtz9iG/8sRqwS24ZO65N8DtuBn/eq/3t/cNn6o3avVcP2AZev7t9uN3v7m75ljv58b26u5c9v1c7/ubhFoewl4PBBrb7Wx6kkn/eq3eK73vAod1xg7KT6Z+79+qau72WV+6xG3zyM3wQp7nlFrnx++vFzfXdlh+Yj8PN9YJXcPdGbUePazLgsqhFujzqZi5feKYLXMK5u5erhYPGFeKFuMC1HWqNT57g7G+u72/mOMUrg41f3t/xkHHib/Q9vgy+UdgB/sqNkMt3c403mh8zRimtjLan3jini8zmtvPWKeWdLoscr2mljTbGl8ppVyvdjtocK28sPmfK67PGNpXzThnHLRU219pabFIp55yxUSv8zirecK+qRrtgHN8RjLKmKqLXNtja9meucNlswEa1w0eVitpqi9/iP5NZ7bEh6/iaUkFHj8NyGX7AAVp71Bypl3nX4Qel8SuV29Ka3BVqoVShG7xLB12Ouow3OlgT8CalrI54O7ZsrOGu1LPV9/lM+1w7i7NwJvD61Nod2SaY1kYfrapL7LQJ9jt/ora5x5FGbMGXVVtrGx0uU9/7WW0Kk+PDhtdEqXedznhZNK4nLg12i0vKy65zb7UpmtxZ+6QsVBvsad1+5k07upW1gyv92m8W1bu3xduj+bHHpYi5ttjs+vnFZxtnYRE4yKa19bEKNjj81PZ1b+rcWo+zm2lbhrzNBlu27mQWzkO2s0U0tYlj7Zcme6GHJlPV2fDluH1xqmOMtXoRFld+Y47rqGPm/SwP1RG3jKuPPcyOZittW4WLiNuKY8E9wn0uVLMpscCO6tJXuPgGt9Z4ZVcWe89MdO1z9Y0wxMK1M9PZplC+afMb9b5obmwZM5XhrHBuuc1wTTKnF2pjTYM/OsdVs1h6R7hUefDW9AY3AguWVxD3pML60q/XJ1p/K9riyPGq42LFoJ3hBS94n3Ff7aiXPix1qU9r8yu1Gzemxw5yX6jofe5qG3HTtZ+pZxfxFB/2GZajN6b6fGiwHVfXWNG4BVijuYkhC3uzLjehLMo8qHBevQNUPlHqG1/8T+bPuZ9Xv4EftjDxxQAk2Y4w8cU1DF9g6gLASbs8QOcohinfwN5PVA3bDx6WT5i4uZ7fzIFHhMH7G0HSByDJ7lYsnQj5cLu4BnYBiPAaAOqdkq+32GXa6DAnyACi+WF8maedETcVvxcYIZp77koDHW4AwMC66zvC113a8P07fXslCJdQdnebsO7hfgS4ELNk2+IECDeLOQF2zwO93d8R3O/SQb5XhCiAmezjVjD2DrslLONE74iq+Ag+wb/Y5e2JlhPAeUy7wfUZPiieKU4Svx2SB8Imrh9uAM83hOIJwN/gOg43PNQdruc1z4cX8547lgMjQO/ly5W+uFIXdDd0EPQiuK47gLgcpf69u2WOpQ+UAXp4opHvVO/dcejyPFNLGH7msY6N87iWnoAUfY5lqWFMAC6sWAAS/sWSdgpYplr+owpLlCJE4GX5hfdERwXowJKs8ElBEGyGmFVGPYd1zIuhaFSYY5nazJYKMBOJDtbDNINJ6KaN17B/Y1xYcCt49cSZWXZ2ii1rAD2sx2msdQ8Y9wBwIK8yRN4c7w34AijBd7AOfrx2eW5dFQGvdBSO8GcAOA7YgY0DErXJcPhKy9lr/vE8Hs/DwZkqswmm82Zmy1LZjy28jIFFAUdwGiqGnfv5ZfUWJ6Fmed7EOFZKV7pSpe5MqPsuvnXz1csqX56cxOMcCNAAeOGcXO4WT7dNLHhx8Wlcvd4qwEmlWlfB6Da4xnUIcsi8kLXCEecz27giOhyyacJyb7Mu25/OF4vXxZdsP5tt9tH0MbwtXgUXa226aNvKLmI4y5s5jrbGavAxK+G87Mro9TqGUdkr1x+HJuJy6tLOAIldr02Pj2Mvvsx/PNtsTr663pZO10tXPSsXY+t1UPBzIarRl7rAgdPZWeN9yHTehys198dl6cxKtzt8LGt+RxzdcVFmXh2ZprQ/4+x7A1fIm4e14GwXrMpXz16cVcWm64PTMbQ8T1xmU2GNajoPeGGHq6RyM/evjf9y+BJvUo1b62Oh88wR6JUNurWncbysuriyema8s6MzGdbUGk5DxVLRJYfzvuwFA3/HF3/efO5+Wf0ifrjdJcZ1TwuFEZH6EZMCeMSCBj3Cogkg+93FlqZNeiNYBKoyJFIksDUkfBEArDXsH+8ZyQflzcQ0wbADCglfNAIL+AR+Jzzo8PM8/XsvHFMYoPyT0IloQE7I74XrvVcfgJNeaK/HwY7puHBEI48C4IKfZBuPf67c3S7RQVAxoba4DAKtaWdkkFcglbcCr2SN91f6jbq9Jp3jkcj+06ZUIsvAIVyai90tibPsS6gnNi4AS8QCmumfsuRAtGL44M65Jjy9tCRxeD2z1nFRAcCsd7FvVkpHOGqVw9ZhoAF+DhhCUHLJfMlVYICWTAbfRcACOSK+BWwUoGcqUyRhWFawrJCZE37A3I7zcw1md24rbBJ7B+5hJQEYKiJUARJgbeuADNhHjHC8LgpA4Ni+r33RfVyV+NjYAzUHw3dpIT5W/vInHBMoIM6GnIqsEYgayRdbvo2OP/fhhVYr7FoHWO/a93q0qi2UOzUleB7eFkg6SD0UbUBzU+7ndIULhe0aksTKWWFq5dMWVAiQ2WgCS1YWMC/VAXmBJrk/rto32TMbKxvmMRxZkFOvswieA/MFOtmRjMzvgTsLb4506GyXt7bwunLNxXh7VNnZiB3lZ+X52ek6bG2ZW1CY3AXQoU2sDTATjM0ORRjDZTgxfoFtESMqvZrXX7XwCDYa63M7DufwSGWJT/Y+r5vmo3f1wmQlyUsGUlG2+VKPAVf1JIDFgM/k3Ar8WUlGbOhocDML2y3deIY7i58ysOnrqvyt521+utJz9QxvxkJp78vlsqejK4HgQIRSZX01m1eAr9GoqnRd6w34uOa1JMCTWxd6iDM7+g3sF47GC2968utf/OfmN/kn6t/FDzCGhTh6+nUEQnNEONf314yFDjGSmOIo7Gme6Iu/SGETTJ2AEIgLggkS9yw2mrxkSG+lUZHGiNG/EzoiRgRCIETlzYGbjdwLg0s9TrGtYEyiTtwyY0Na9V0y9tuEJAxiBT2EhjDEvSXruCKhgNlP1ALMbf8g/Ao/7ME89oJ+Wxzexe6FoslzLxcCBFOoSh72QSU8wCvTJTnFJbsmSSNxeqOE0wiI6EOcig/tr3l03Bq418V2v9sipsW1eKMfhJsBgHmA97IFHOX+4Vo+rZ/MhpPVrFy1WAxkxQj2Clt5+Fx7Fo6ty4kU4OhYE7jLwAZbFhuszYK3XYNa2BaLKpI14fYTHrCJatFvc7glXcKbeBWTWYNnZKpHkIY1F+FhyI4YT9Jx1zlCQYBQl/lanz+vadmAuh6kggxFgU94ghaDMjg60H+VAtNcS0jaBi/hHbZtIhlA3vjIMA3hmRcMIs60M4aHJFE82LL1p+c25qV70c9caZ+2TYFQkCwCpwnXB4aXKYSava4Q9cV86EG9Qmm5uVLQAeAhZy1IR/dMwoV9RPe3ZyXIoytftC/PEXrPeAFmGY5Gt7rLi/tg1+3HatPcLytT2Sqz4EOnXh/b1VksqrmZfevt27BQ32x3mV8/+/FhBEYSCnXIs8o+O2tjGd80s5Pq6OHLxZHu7UUMTQPwgmeoyhf6pPp0jNWzt31x3GbmyLarvDgZjF6u/Los/QBvgfBosGbwy8XRmc+LUJp8zcBsGRGf2s5mleuq/rQAw8jOsvwownLqmT8+yfeZfl0UJRCzqbplMLVd48RiHqzn3a4Z1zleFJX+8TatLCwxRPi8d1kbliuT1VpF4PoQjyJvFL1V0IyjEYp1g13nYZzpsYVH6RhjYzvgrAjGgdo2lsJ5NTHKIjBFSMqrj8157syTd8aEP3/3i//B9O7Pql/GDymJdH8zh2NnDmvKXpFXNGq8SdB0oq7lK3Mr4wJUZJ6YCP8ZhYXME3iRMgj1IIMBIIGyHHIyQlvmsnFx8IjmiAspByWYxO+FQO23CQWY2wEkYUfHSnJszIqFwd+c6IkEAfa8REmJDNUIEu0b9ZkiDlz4tO37tIO9pNyIFkxvSayGP/iyl9Byf5sgi5/k8SU0k0gNRwmYkLNt1H5CyylZxi0zlrtNn0Bspz4VVHyYh4sAggMsOsASgYmItt1LqLefknT3tym2vdgiULvh/iQ3qP7PKtox9BrusS7GejAB5hiCLYIzIQcBMA4YFM3ZxQo2X9o5bv9GwWZhdZoMBXc8B/w4Lm1CkaRkZK0gRNHHeZ2vyzFrVTl6YEak/4VN8X9mB+BxEddhOfpZy2iH+SgtyTCCiMCRI8Mq8VtBwtVxLD3pVKbJvZwvTeaMNSGln8BAQspq6WQIiAOtqoRcYXOAqkxQzZqYwM0GgiVMxDbGLIOt7IsSIULW0nKwGUnFRfkunL81tsDlsb7NEXCZuuyOuosY4efhkXGZrESHxFjPSI9nGdxP2i5zYC3Yua0H2+Tzy6wEVOLsc9B87ciXqmW9d/P4dHOZn6h+3Q4/7D8qbIlt4Eh1Hd3qSHehXvzMGAdQjcyG4yzrYr5aLWI2MG2GcOujMtsCCuKoGlyVzjUtAj5r8iwUw8z0s3XeahdaUxYxbmbWfvqbfYmb5Nevi1fOtOAtlaHnyDvX3jb38Be48942bcAvF1eLwgBUChNC9J298LzNCAsrYgfYIb6Up9nKfVwPvoAfCBFnrEzh7aYqztzqWTZvX1uPaDUzTVHD/dtj28bWxPNnS4eP2wpbcMAP8KispZcZ2zK4kKte4SaDYWe4fbggvJPwTj+qfV3OPCh8v173EciYqTw3PX+PN2Od6UyCZu86H7i0wJvLwHjqd3/xd8zPuJ9Xv1Vy7xLuMD1M/jGXOIhmyCSSpOCZ0RmmHI/EKWQujIEIRn6/vTKIM5KB3x14gnpIqSNJ9/BXQnYAdn5ggJUyOgQsgo58I7gnv5kiJ5ANCXkm1nMr2xGwIKwQwBKwXZErfeAuJJnOXSfmQcDb305wI6miKUN/Ax4yB7+5T0ERozNBjofrD5rM8IO6/1TN76dsOYJKVgbmY5gfq7C7EEoF4kYQF8R+uCdhSkTQ/KSfwzDhl+mPcNUjzYLhDO8H7BAw4+GOxRphKpmqonre+czRqEsabTTApJTqsMSECCIiWQYx6ryEzylhNdgmVg12kjFL/axYXDYvz8va5Oap0VVOu6VzmtEl6ZTDAEcJMHsuDtMyl+xVf86NNjX2kvu1WxmBkWh6xlTg/yVsvcutGV2PY4T5KFIpsHl8LJLkRBAEpk0yX9qeGXX+5VdfCjvRpsBfnCRWNuAqNzBq83+cDfEyNmflc51ff7MN+ywE4NY8HOnhlR1nIXsVByz6zOVvN+9zRC2ES8Bw9DX42liPmSNbDD5bOmw/2qbKC70p8vcxf+ZWPmA3w61berfPgQdSlnB+PSyJQKGpXQOLuik+NzXAQZu2PEVY1PnZGoeR66WBhWrGu3GM/tpn2ZFBEKZAxHQOIpbh1Jt2aNc+Lor6bfjhnmkj/fpFMB94t3nvGpjaos5H/8wNMGLfZvlCFXlk6GIRS4Hk2tJc1DNnF7PaysVSUnCIumzoPzJTxDbqrIWD8IiMW107OCUXWQXBXXC5H47PJT/yZ774u+aX3E+rn5xiHS7NRl3TtF9KxWYUw9Ujk79csIvrh+vFNb9LdpLcIem55GgfbiTpcM9caAqPEhP47h8puPGDu4udT7legYjFAK4ypoDJH3LDTL0ATE5TLmV/Ze4ed3kw61SjExfPzOmVxuv+o0QD8Plh2tg4lyw3OE4CCkYu5BuSNpaksuR+hSi807e7hEqHuhs/cai0Sa55ogwTwvBX79Qe+8IxCTu5u5aMMfFBfx2wvIlrW8DVlxFmRv8tRoxAtYcDjozQWWViNsPj9o6tphGy+ITFb1khgpXndLylwT3VfpaNwGhvj9u+gY3B7rGxgpSbmJFz684BGjTWB+lFoLMdVic2COUQJoGVHZREJ/gcjI2pGIJFGchsjQKRYcRj5EsMz4qqYKAPdqzpymFXRCVgD1iEMlws5BlyBFiPHoaOn9pytIO1Kx5VyLxhKQO0GRtCQOJ+GP7f61lct9sIC3K1tcdMouiyVjkCLWY85Jjq3pZ1McdRel484KQCkIFWgVBE0zDsKrQ9HUEoFmftPdDneJxnLNIY+vjy+ctl9rPMLDSf4lK21xtbLrNP1q9clbdWn/a/9X5rWZ2pynicl50pwHbgD1vVgriYCPf+rPqmdZXJ8wJkICz1t+t41AOqtDlz93fdondHAL6i7MLnzeur3qkT1YUq6DDMNsvKx338ieI+qm5tz63pl3H47PjE56U6mhk44MGxohb31Z3ceJCCJU4Pjnz5DWeL1jZFGcjyKgSPHXDkeEFEx2X3HtdB0TE0MWtK1sh/P2x6dL9P/Zr6B+ofwa6lsiLhQSL/4rZ3t6k2s99JgnMuRXM/UfVUnBnEAJMp4rf7lB8gYd/Bg7JgcggPUuEkiHtOKVD5WHJswzyZdwouHgs/UiKnoxfnL+xcDEdq5JJXZIVmJ8USMvNDiADfLrHIlCfZTQUXVnlYfQGoIM654LthvruH2/spH3L3XuOML7i3tKF7Bhq3BzNmAYcwdnc7ZTnN3e3FDoGFJDdBU97wcu2k5H57sPub6/T9dUoN398QKBfzBKNjSu7ypOH+eVn228BLgXfdLOY388O7JO6qtbk/BB44rDe6NosTky4m3iWnuBgHuZMPb4HDd3tQi8U9UwUsgpPaEleOwNSZBqMRMpsOK3VSBWG6XevK2lD5slWvmzkMK5P8okW0UggqYC219PRGqjuwa2yxMK7Skizx2B5xyiHOkTJ7gG3V2cusimC8KelRMhtBVGHhR/xZiktgs5n1LKxECX3J6osmIIpmWl8AB16TWBiYysBvI/h0TAlXj60BHuvO3XNbPBLL6he37BgQKSech55Xw1YNMaSE3TOpxx9YotesrUdGTogxxBEG5nAMybjEK7QoXZ5U+drOVNUwP1I8Y3DimG8mo2J5SDJJlAGATjmmj6Ru5mCu4A5/QePwzSZ+u/w5NdvEZs06Ey88t+89D8++iouQX3lQ8MbheAzikzruP/Ivq1CESp/vV3Of55f9KpYgG6zbPH1/su6D9VleX7y8jG9NVuVa522Iy2x43q5cwXNGvOMRXIXh22uLcNH36UYAAk9eAICyuDoeEEoGphwMwyNcQOd9ONPtoq63c/KHuclZh7Fd2XzS/qiZH4ftsfanVpLsgbltc9Yc7d1JNc+5EyU5Wd4ChfWzcvapUb3tX/6sck2lLjI/3Sv4r7zX+3U+xt7rJaIjyZPFYNRAjUcWgNhS0Na801aiY6xW3FOmrnJeeywVrifea9EzpLBRM4tVgfCAGmUdCWSpalzmzDu3wM3NdQimUrG1HwX9/QUcj/rOP565syt7vkcEZGtcVUWJBTYv5HPmvrRm9eAMfssvlVDv5n0Lh1dGK5zpxRe/Yf4N9x+qf4gfJCETjjUTLm8UA51R4C7xe+LmiWK96MTgtYdrQUV1qE/DY/It1wdaFLZTqhmREbZGzASgeW73jUK0tjuA027iUItrJoau7B2xD3ACgCcMnzLzPFGvxxLUvHYESeB8AhXEIok+TSWdhwO0S0bmSk/AvhMqhQORFPYQDnki0rJRHMR08LfiBe4fUhH7QYpXKaK6P6RrHv8Hzt5eabx4t01FrymjnLLddFVSV/fJeTTCrJLfkEqcvqGaCwdwsdviMgUvKL2fBFMPey8KBQR8N1LtEm6aSmfp37t7yQNJFgsgn3JNn9LFkMTu05v1u2b0sZaFCCwC7cLS88fV0eg8FnkKqVgYZRGeeBCkbqWWWC6+LWdNAJkITK/YwNib+hRmDwuqZwKifGWOwcsLx+9It8i1EEQQLWC5rpRsCLMiJi8ETzUDQcv6rAqhZWQXZoDtY5ARqVxHMUOAhGsyEcMg6Ccxil3B9CarNKAzPNaS6RUEgIMqnl4OEj0iRlGmbYyUoExKYxMTB2zKs/ZrmPTxqRTnsRVSPRx7YQSgaYgGUJOT/4V6kI3QnAnueaQVRZwN6BKCMTLIlNTW4OKRp+yTACrn4aqZz90LfRMQCZP8wUNoAWAmk0Ai3an782VWL7OLWM9ZdlMk1WuGVkaSam25smenTf/DNquDS7BfSPmQ28opF9jCccQSNwIhEu6Xj6EG+4brsXRhuHoNKHUewYFB/9amPXaAsTwWVuoIscrUxpiqDyFr7EzuHeAZYewqe/ai+0mfndSXoOlNi0CwdDls8U2Nm2DK4/msUp2btcsKQfFwZNY1GOPzquqFD5cA39jVxQtmzw1ili7E93ZcFlnGIPmkH+u1PlGD7rx74ffaPjdDLEFUcd2auN4/u65PwXpNEdSr2aUvcSH65/2safNZW3uG9YWny+u06gByelYNw3mG5QBohdM3oYRPKWfGZiByS5FAeUqbLmsW6Jmt1klKQj8YY2ZMW1VrEOZzeNfWLyzTRjq4vLRdv5p18Tx80tUgKitfg3rYHhcRAYlleg0hz5IOtMI9ck+eaPUvffFfmB9xf0z9svpLQFaYOWtdPnHQIWW852F7EYaJQgqp9Qe5ENAAaCGZaEk6PeaJEgqmNNRiIovDVJZnSJkI4l3CJqmhTSpQpnzku1TEur2aquoTg552/MiUydGmQ0v5LYoIDvuRAHJKFk25aEae9yl/TdHRdxHy9n5/qMwD2HhgH9SFVPSS0IrvuvBJq3Q4MH5z+0HNT9UtVVrXpyJbuk96qfeGqJawj6D+hjnupOMiTk/QD7h8uE8FTfEO8E7DzfVUP0gM14Ij31+EFBHcm5uaNNBXfcwypqQRzAmOaUlDszCVMe3DsNBMAGE9cUMskckiWK4DG4Gbn6k4W+5jRgMbGdl4BEeSg6Ra0XqR+YFAFQiZDPWcnbGFsNSRXLBwOXHaSPGdWIEISuigyyJX9/2sHW9XjFofupz8mCRXseyONcygVdRDAGBBtaTmxLHpvJD8GvFxBkah57kZbK0rzwSVZ2L+0g6d611R1sRJha8AkUBCjojOlQJ0PEGbsug4IoVD2nR340wys0HYNaAzI6OBozl2FmFj851fx8aXIJ8kbParal4eVd1yo87Ws+gR9xo/nC1rkwRRBQi5x4F2vsqCWRm7wrXC2c0tVmuBq9HB7jOdZ51pSnMLRrdsLILZwsyK3G9MGzfZql6WOhZY1HFj+uNc+3IcbFKPgkmXbhkWIPW+aqvWd6aaO9zvZ93QOJ85f2lLW9Xure5w+K6qdLG56SP1EHA1iujhddVVw5hnuu0Qkbs2L7NltixLRLB0AMDdzMRV+7rpO5Z1g8FvYj43nRuP6mObV7gRsQ956E6LMu4obMBZzo7aQre4E2bmciuyNHBdIx5vtPBU9Rjd3i3B+XJfxMBkCg8Gl/w0tus2ANNx50abU7HhVQ2i7pq8Hmc/AJzLjuqKNZmI+0QnVnEZx/Y7v+7KzBbForXVJbHr3/vir5ifBXb9AfVLT55czqd6vEjK97vt7uFA3KSKtUuJJkkx6QtBskOWill0L5xRhJhJFTnJC2p94e9u3zuJZm93h/CbRXy8cKXvUqXqjZKgmqmyK/1BCRV6EGm7mDor8Qha7Z3EsEmASITxd/uLu4e7R/nRQtLYxLMpUhX6BVzYexEZkfcBHoaL6ySRGIdDiu9RJsVP3CQgRhTBvD9PK8lMJ6mkpCWo0R9rPf8uaAPDUsg7Js3UxGUF8YewxYdNmLAUGLnd1cr8ZMTi3cxHgg2CHtCIxhVBdIpOhag6daQ1lX5cIBTVWglDKb+W8MZn/bE1xVHzBq8gmlb1ORaJcyGQ2zFolUVK/Z/LVEZROQGMggIGdlK8MqIS2Efqc5SPvtW+NqxgRSas4DGxRN3xkSttLYfhaitBB0ibp848FrRnZXIHepfeo2xvRY85U7ZJMiHWgbxIIYV77dsbyoQQYoluwTlhcFmWzbABEjLKHGwutNJTGJ+cuNa/zc9JKGG8ODESsKFYuELrrC6fGkSPxp635iv58ryfxSMmFXJQJn1jvxzDZo5o67NN3vnCYGFaMj/bN3l81hxf1NHg5SNdBmCB81VlVoB90ZP6c48Qu7DxtH++WbQ7WxKSCxj67DgrrstvHdsAHu2iHRHOBlt/3PezIStbswDB8q61phnjh44RrIN30brGHuqyDT11ii1T4Vm+I7YtSlyHwHWgQxNPbd7N+irrpBbPtIZiOpzQRO04vAapLQix2paAAj3gHgAkfIj9jMuhi66w5WftsxBK0qAK5wgHkIEesrDn8jgeNa+Xq7xcME5lxTUGh3h08WL28Rx3rvZlBWdg8tr87h9CaM9sf5mVOFyJKd998ff0P3H/gfqD1CneI1A5VYG29HYM14vr4WKQUj0oUQIFSRrdiAD7/pBon+jQiZ7EQpPFEjcG4UbphVPl04amGAuUKVnaJEiai1xpYMRa60ZdHLKBF9tUpbsy3xPKUaUkGurHvyRL7yZ2pA8p/Hkq1E3i7pQjZMcMAkufOmtEcX1QAwB/bveH4DRRovdqd5AiTOKj/cU29djciprpbhumwv19Sh0KiyPu4WLiMl2nRJ+eZJbEX/VfbbvuuF7pOcmIkAmXZH2xg8XA+lcIyIYm9LrVKvOVZXKCqlbP8jpICKyLGW1YSiCLCHCICVM80y6OhCgydnFBdH1eYjRCBP2SbbQv8vNTyl69pLVMMWPzBBNZVmRqtH9iDgJQiZcYf+mexwD0kZIfuIbsMFdxXJndZtYZd7QSgYFxrc+kkkfGZYWEGOmXQYSALTcUh2uR6imAFNNpVvV5lZdACssoEyfADorcZF2V1esCpyP0Dg7eRDPAer55tINVAr7yRSsRrhJZuSkWFmEJs26Sp8uw6zwHRWtHEI1jb2PDq4GtYPdaagG5ZfOO8Z06fVkddcugu23D4KPzsfVjnPWILr52Wjb908pnQL9xnp2GfW4ajziYzLLKdWzKcG6WIV9VS71kjGJMXaza+uIu+1IG8GIjBmjiJvpFHqpmvnBr15ZsC8GtNxHBWBwRPFnXdGU09Sa/furHzum1b9WyzaqqOMX1yN3saN7VwRQgqm44jXYWinxpqlD5ilV+p6IrfVczQT/u3gxvQouVUMzOroJqQhztRs/mICEErAqrIyt38ypbtCdFl2tqFKhULQIBLoPxNnFDjPhXv/hvzF90/4o6xQ8XjEL8lqxgkgON8n9KQIf0I+zthm6YeiC4X9IKGuOxktDlEJosUp+FRCFT5PFBTZ42peuZrVF30iQnxT1pjZinhMpktmA679R1kgnjhYfvJQ1hFFxh68jNNTMsMEl2fn23dw2oYhM7wg4u/HfBZJIW8/+Ha3aXYCv8+eaDAkakJj0RV3r9eYP7kenavJ77ggsEvrJ2iGBtASYcSe+kmwo2HEjuuehSE4MzOZePNu28OHrTH1XgjqxlsdS1KAosQUp8Gn3Vn5+wwgYXRuUabrWDxWs3xNYyA8/cjUI4A4/govdB+8EO0kzBejs1N0wlF7rkzp2tY2NEGajZdcHQAM7d9XCANh+Nx8rKFKAJkfIqKxwphJyBidRClyQKmfvq8922tX9PgyUrVVQ2K7ImdDr+5otPXn0yK85XbuhnKi9iLDNQWIpajK7VEkQoxM2K5UgcTKG6aPvi+xe/q+byuGl9cTTk2XO4cV8X9eJu/DA89/Pydv75Sdc3+lWzOaHz0mzI0AF2d+R/cL74dv2c2Smju9JdvZoVn+jwzB4fdWV7ed32i+cMhcokL8pmLQVSUmQwpCMUKWa61zjSJHFg3Q7MBey8VK4iW3ODcnVJ6C3pXxmfUZugV1TPUVJu90ffZ9TXT7qn1NFp9We++Nv20v3b6pfUf8xM7QJeMFzTBqYvL9V2HMaNqhVcCQN0StL2UqoSN0kCC+eYEpGjNPwEKjrkl4vvbWQ6VLXvGLpPPktytXe3SVI7NZXKCmeOEz5QjFJYffKRbCgQlzxxXknobuibp3L5/WOO9k6idXpJMnkzuco7UclhdwNT0VcHdv+Yjd3ycKQb6z5x/wfZ1qGxQLO2P9X/afo4wXC7346NuhtE5TdQX0fScar2A3vDEueQr7xaFxIrXE9G/ZgdoVz6XgWqfnd329RwJkWxURR3lAk/3Mzxj20HBF/SCumogGNEXJH7uo3P29L5n0bozXsNYwBjLo2LLf0a1kouBW3q50zQi3xn3NnT0/6eRoH17UsfK6lVSxICXo52hP3MsFrZgBkYuFsWgjJqzfi+DGQ9MKMRy4fq0kegQR4LKc8LG5eSTi5dTvC9LJhHrlks48BWUvxpcSyekQVoL8JVvc4NuGrRLtQ6VqbCgdmTq03nWl0Fpk+5XYqA2a/lck+lMtDLwcL8gkIeWKmEIla9Xo2uqILtQ5apnrKcIxDj6jvfKL1NvAHHBTh7Oh8GX85ixPbgVv6ET7gjIpA6MN1sJbb3Bib2e2aDeWkKU1Ko0hyt+5f7by2Nyd5mq+VmaStcAhMj1TSigw7uyC+btdbrtsd1iWxHsGVfLPX5eb1VvgfJoJAxA09v7UDVgo77IW8i7lYwc+tHh0V22q/LijCcS6Ka1xPIlhe4UPCXCMvbFy6uLIKoronuJCLE+KwMA4KZUNl5YwqfWXebr31TmnMgo58rrCGsg6JsPjcvwkfdRVsUXB+gbfD1xy/Uy5hfZbo09SsXL+NM7pJeLtxTxFsLjaNuwLXsUMzB+rq8JYfoPAAS8ZzakKYwxrL+qlgN9sKHI90WJBpDrMHocFS2V5QjsB/lZwFnbcA10zMdjrq5dWPLkAPRxkI6sJLe93/94q+YP+3+uPrv8UMqJU+cm+IxycJNUfWQsObwZ3zsZ+efm0PY7feEEv76QspIUnzes11btLr71Lgd/MMtxa1sP7jdT5LfVH7hnuaTcPfQsJ2OSjBrkheLMIhlIQCL3qdEx93uIoERcWbqXn9IqQ4hD9skENQTs6hFkjxOlTAi3zyJdvapMP+ZYpFIuuWnRiOgnehq7ve7QwO9NDXdscSzP0wBEOGxbOb2jeA3kB9nTa0zX97t7x4S8u4udklsSExN4rz99wgHp1L/u+/RCEsbw/3U+yAFpH1qWOAp3ydKgxA55K42bWYEagzrRFpS5oj+Wh9XBejEqOebBiyx9GqO6LxWeqGrplhKeiKXIIKSHwkM2DwuFfMgVXkl2qBqDK2E8qkqykJTzZ5JkfFEZj0kgsDCZUV9tUKUrUn3GawwAcZeRTAOvjFmtW0amoIkb8E7yswmBRKjkTw5adMAt5q3vW1n0pPFtyZxgZGMKWlO5r0pwAvguBHZ40BKqtTY0aTTUTFNnIoIHiAbpJlKDgQvsc6DzZQN4Bb4NdN6rmLMG+deR5au3Mz1CKDtcTuvnQfaWsqo8U3Qtu/cL2p2WxTzsHJPKz2yyrVmsdvyJPFWoF8X2/bq1r0qfMDpZctq80P1V6MqpOXbe4b2ZWOPSwROzQj6hoMqLiute26lXYU44JjgQXIEL6dh7nDGdrWvXzY1whBYRMhaa066YVbbszjGxfykKn6LDdfu6tJvXrg5zgHYNSzds67NgPkuD50ZyuwMpx5cvXDBqptYX0Rg4h5+4qE5OyZqqKwyr2P1amsRoTRmmPWqzeqP2nWsHdNPfexCLNpuLPSiXJ9xdIGliMPHLNrn1y5ch7YVomWfOTVma1C4+WpYMKFTj/r85MXTm9K8bsrbl8zOzhaqanMQQw2XAgpdq67zLKR7xob9MS5TI64GER7eUdsZ1oPHMfLFjunibvt6v+59712/jvqF818aVgaUeKHBmTXWu45hGWNT+xZ4iwVvisLaUBeIc37gi79t/qj7GfVfC2sDylB0e5N6qyY1EqvRU3WcusFFuB7DDeW1O4pi8K7rQ/Jy0uEk7CSOGSqT1UuVykTD/FTdz0/1nDJdYiejqv1dDazaSfdS+vNODV5Uw6kuzk4CkQhe7LaSPr3YHXSLCYMF4mo1VZ/ksAc/FYMOcdc0IeQgLxRxccrNCtRc7CYOKTM/7qbu0NS8JeNKBLiSbHH/MNJPPCongfzv9AXIFvZ9vWDHP8guLtT2UcOVGkrfqaRn3G3FDSQ+Nima0zgRPRXO2fMhkslUPaKruNg/3NzupW/0g7q92D3cTerre/0aKIDFwqJNIRkPttGkLmy2IlPlTAkwK7M6ygwFq2/U0i9YaNcI4gkphWgvFAIKkc9o3QyGKTlOzXDSGWqqLG9Su0IhmYlUeTJpcAfxiEQsVY9zCgdB8hCbO9MDAWGIvsVXVp966TJwda3dBndp01DFDHRafHj1tHRnR2B4XvTU4H2inwEY0YHjUCr2FJoU1hDbAhCh96LNZLKTXUB5ZFXUSUGMEQuiURJKZl6w0YabjSRiLkx5WG8L7szz9azSMzukCSQwFDYGENgnIbiSMhSTo511P8WMNtVgpeiUSmqftapYiVLUPwWqbmDUTNOQwC4k5dmp1i/a5drM89cn6njB9MaRQCvbJNfRZDn4o2YHCrNWMwBG4TJRIyP0oyZKcapKYG+9YnQs9JDNu73vtJ2FnMk10zPJxKa3Ps7szLetnc/zV6/GO19i3ZaLobj67NS0lT3JsrxcnOcxXyM6zcqVX6dxKm1dHtc5gJz3jGvG69UGvM0tcM1drjKiS30c2nL1JTesfDZWiMDVs03BbIrfAObpgooX+Vi12byuq0hNNiJ4nAPWU7Bl630Dnq2LxpS+0i+1esb7zA4tHCMpvo/rszN37Mpo69bMM52ZUhfsqKVoqlzBXzhHRqlzaUGBX8b2camBlMzhfPTFf2d+zv1b6leot2YO5PZ+XMzvFkxrLIbrhzRmAyZ7ww6Cu4dBgCl1aR3rYR8Wo2RN2ao+TNUWbmCaabG4uX/gkI7F9jM1RX43bFadCsX3k9goNWxMc5P8Y11bXmWtfSqPH8glyV54bFnX/nuE3LdXJu1md7s7zOI4FLjvUhQ8tbAehjRNpPImJawGfxjVQXwiT709pGmF5n1vH5eMwLibEsIIdbe7lPmVLjHZ/RslvR+PGJawiyRwLx3uOyq7774nrbwXGejNB3UtJXimsNjdAUBnq9ctaBAzmDrNnDAuKQ1FKAkGJZoUSQTjP6dBHnTjzjbPs+1KOhlgmcyiRqrfjAPecK1kVMuJII6dn2HjOh/3AevTNIj7pB4FtOhoNV6nxkREvKYCR+oRxGYcbLSosqxpXo2CCaYgsjB0ghN1LkmJkv1JaatIE36k2JVSLkBQF6j+sDEPpWrLi6ZxvmZ/kDRtqspIZVWnD1H5ByZbSGtYxv78CmdhX3phf6VZtf25/4iRK9VJTvJsotFjgtxZaUARQanwQWENuFx/wk5dtpLTwk45KQj7jSxfiQTQyXAkxUw5rI6x9sKusNMGhOQi+0H7CQVYir1PSsYzUTpjOJspAGNE/ydSFUTQdpbDXp/HpyE7FlF9rMrBgjQGnR2542zZRepPEaTWWfjGanHnh2/ub/rm+Q9dkLCd+GLTrhdlNcsvjUz1cSMYT5nPy/3586Xe5I3wb6C1r56NT4u7bK6H9VEo8+Iqiy3nG5gKnLFbHi/O3G0Y6xyBLHBr7Cs4yHnRjixVwrXkdm7sEKptsy82IQyAfNParJoN7fOrAU4HDqvVl6sceOr39bvTly2IWVUo9Tb+M7MxNy12hCBhEVsVbcteUyODqFgFZG6SjFmUZN4md+ymUSxYHNSO/0df/DXzy+5PPvlrT/6hKoBRJylgAymqpz6sEx0kkJpLbwUVjBTqgCCINJuia1KJVJ2myTHaTLPSmOBKRSzJuKVWSlj1laad8nsaurSUi9XbhykxfYi6rm9TkBqY3nqvT5SgyjGYyEFYDnpHaHKLK+kM5RFMyDOVk9OEoRN9+djZ+oaglEBukNkZ3Kf5M1w3Mx295GURojRlVrM2lHeiMjEBZEFCLMT1far+EhAA+7X3RQrgDKM63IDT+aKpxgrGyloP9SRZDddjOxm6sNSdo5H6tQkvbHbml70bpChDI2TbE8jys8+fM6fg6MitqLLAPI43G2v33Xf+HXYnwj4iewB1KtvU1kpGOSvUWwv37EKhaa1gH9LUxHE6dKGIkv5T83G3WCIYUUImKrz31DUv2stv/sziNz2s4y/kW5ddlnaZI7xR7bpb5c4XAKQsmNs3xacP/3z3Q9kL3fzu8+6D/+Zy9eNVvqgR1ncvXbfdrXYz3/mjV2Z7stuWzXG/q5bvvi9/t365YvfqUaca386qz+tXofjRcs2uN3ar0Gbsyj7/lv4txdnrrT+53nfNrij7u1lcEoAJkHGsmp/+6JeArDPbttly6T4aZSAQ29uiHSObQgLYQh6/v4GHbyn0YJOWrcpyNl+ZRZ5VuBsgT4s0M+tfw/r/pvtj6o/QP0sp8/76get0vx39NmzvLqT5eYvlDn69l6zxFsuZoUcA4d7tL8JW0ruHIudUmT1Vw8V2sqVDF+P3iBtkChaTrEnpIFqy78YTAwW4YkRDas8YRHPmp5k0mvOtmAe5TenhqaX6g3QW8kMpWc10i3Rx3VxPE2d21IowrrljS8fUS5WaMhKX39++P0ynuN+9lAFf93d75nInKdvtoUybhlZohhe7g7TljU790tuUz2HTiAQ29Ln3ODyZ/vdwnThCmpG1078gzQKJ3QaZTyWSUyv+IpUfyfLFqflG9IYGVB6eUdFm2GVXGx1KVwFAwac9ZWG6Jm8Uel2oUFADxMwBowmRk9nU9yy1VJJxNmIejXPg7CpJWAGclu3S+UinBz+XsYfA+Nz3xp+a8bI9Bk2ts4+0lGV9ACUWRQgtcuq+4FQrXYg/FlUY/ELQkm5mjojd0rIv+r1I7WxlOfeITdYytIoyJeVKqkPICyQLKcjNgxGxr/uWDJlgZSjN3VHJ4Ypuk7xARtRIEczpDJex0VVz7FvTEW6KmY0bwyYS39lsZtr9WXYxBlcZ1bQO7k1nu/DUSqAVHGACQUd1/kn+rLtx8aNZ897F+/J5bdscKNPhA7FAKFIb12RVswAoDvGjyBFcx76cjW3+sj3dLm5VgVtT90/Hk+cxvChyRApAF1/a0JRxgchHMqm21DN8C660M82yaeB+T118UxfB1fvheFszMowqGtyE+EYayPxocX/JcIzLAN96Pndj7Y7sYM1VFkovvfQu8XvNyCpy/IwtakvzZBiQslKM5PrxiX7yg1/8L+b/cf+6eq2+9OTJ5YlK6cRaTa1KKUV5FWQ+ZvAfDamMI9afCDUQoFGSMPXsvpoyibvDFDsxm4eko6ctbMPEm5mYxJftMTGEdvpOpRmcnKDy4bFMupDYQQbhUZiVKrdp9gpI8vuYDDn1NxPZbul0EwMGbEgCYkIbL6M/U9pZCwb5CbXSsCv/3SYufi/tmfJRM09ul8HE3PyDInNlddy9bDgeqVTVWXv01R7LPAhP8/NOIkb6tYG5ScRhq/uzuoYDzIIIu09OfVFLY0aAI1NP6wjLsF0cFmsLbG/Lxj/vTl8VJSm1L8pYyoCVwGSerxHFh8JVsSjzEYYgelJdb2b+qSSqQhkd1ehPZYZjaPRgitSuQxlHG9uhPD152qwM43xd+XaxGT+uw619v3qawcTKwhZRkmCp6JLpImtlCBlHL2T35afh+O+J8r071/f/Y7jM1uv8xXYEe94cZctzW6y7zcsgQnmwDVd8dgfrL9QVp630fyiue3fOju6ji9JTaAvuCcIKFjdnAmEGali7oqjnPvzxsyL7A+3vWo7L4vOrdx8Q6A7lKq58HmWODStfmfB5RRE2y11BsfblQoAPdrHHheKo0nrmGst+QsQpTT1s2s/LOakCrgsuqrO1W3L6KQ64Kv1p+dQUQznEi1l3DeYMm3VxPouX9nl3Frql+4XjePVCm1mWtS1vOYCyGvpPNktTVK5tfLWL3+9a/S+YNmt+CvehcubPsm7bfvG3zG9zf0idqZfTlIHF43C1C1JHLvr72xs4k7vbFLTu0sSA22msmUgZZOYtdQVbP2mO0jiSUYqPSew01Uw8a7usEdySXnJkmgwHeFQ1PRpiqgVPK/8wSngnQeteKq5iVQ+HUQNSb30vis9bCUUvHvtd6FcP2TZWOGScpcwVWJxYVlr9NDxK9jaNmxJn7xb371QKz823QsWkbsEwJ9c1ZTCiaWM7IC3K5krqB8HC90Qv4B+Z7JDRE3SCBUMC9t9lueKoENhXy1aRo7I/eR4yVUnQGVkeBYyqmTaFdSBglwW8rClLmOlAPxqwfRPmR8OJIRYrsOM85DC3VoVKS6GUuSEez3DjLgJnYdiZsc2lOqnn31wzXjZZVTZfrvI2pvQdnaQXNSD+u7gcOzPHKVWA6u980tkmB2D8xvm3+y9Z01jfKZx+aizWLNd2pOm+YKqrNzD1zGbKFmPI3rZv2RTCfkHwZLJxVvW+Hstf0Fez5kear/iTpn56bPb74eTEl/OQL2zxyepKf8hmLXsy9CI7Kta+e7bTR2ER/E1WZFr/9vnHsXftLPbFR+cfxT47qi9dOUekp1Vf5m/DPDOL4JbudfXiI5e/eKXbF7E7qYdvF/nlSpoL34YP+ekct9FeDrm7MPnqS+pFPa+KwDAQl/CoPWpmPvq6AvLhloNMZAVnNALuQr/o92FjIzCxOprHja8yzv9UooX4ddjU73SfqpfqLaM6LcNykrT/0JMmTQq3yU6SHjmJHThdkFaW5pvht+Zi/091jkmfBA0w5cX3nIWYxv0sxo0KqSuDMqCpZeKFkX6z9weJj3w0zeOYUusilXivLg7T0Chl2PvvDga4WAwX47Uk7LejTwrmaVQHh13LqMPHqdg34lrvHguG0gjxAL8c2I43yPkyRUUt5PhoZgrMfT+Yz01b4ts8L3QWZD4zxxO6wpWgfyyMaWGdacEivgra1d5R6FdyEOmZYmJw5q20LhraITOzMqjHk43KqBv23npznOlI3ZCDwfiaLFCq7ZTXsSmKs6pp0SbJAqVDC3SqYkeFprjfirgwU8WClFFUwKoJPr+IOSzIreLHMWr4Ewa1dH/Gvur2O64dOSiZZOU0+8KWwR1n5eY7/xlg4v+ur7ZMlOjL/vZk+HoZ+iws4smCTXDpQDj45jQ7CZ/Xp1nz+kN5XzcZKLW/yMJxJSxY9wDYszqrXNnavD3Jqsv4pcF9pWw+Vad6FhEa1tuzqnMv7Y9kYXbZUZJl7XxZH/uZDECbr/1lQLxaVH72A33zKovHVbbMnn9l8W2Pq0VTiAiVTd4YOJ45wsf1N55mZ5vGbRhkwsfmzIotYosrAoL3XJ2ev6iPvr/pXf5yXb3OhlUx13lbuAFhQgMGWupyWQ1Lo16Ooy5xxzjxWana50E3iMKFxPvs/f/3v3vbp77QPwgb+2H3R9SfYuy4SLFXGCZ9zHYPI3i4Yfv3lLWdT4V2rMCLqTFpymIckrKpbhRSPrbWUzQnEaJ/7NOR8Tvi32RExXv9gaHUfZpkwQ6iadT8lEJlviPlfg89UgO7/9MY5mkEgIwXlR8OI3UO3UlCVm/TrNL7aeqnzHu/u1KpCv6A8HDqYno02VEkwJzzd7e7YBS9SULEU4XdXI/smQ0v1faNepyJz8al3cNj9el2MudpxD2Y6oPI/6/vP0ydn7xcknCaxgjd69c3C6ntyPhMtrHrvNJzswnP8rWUhHNyNvw6U52MfzOuYpenpHdwdwG5ATyIszxzx3Z0mCnsMWa899FUs1g9lD22BCdZSgzHD1lOjMtE7F+buZUjkBxq6oUEbLB4LLIhk1fPji/Ot0Ftynwh/UGcXMpZGoUcAoK+lu1JFAazRMFwDxGnlhkynH7BA5E0nryd+2RfC90v2Db7mWQ0gLwjOl1y5pdlzpTsQi2o24e3bAI/pHJpRGU2mzATCoMIzv2ivvDrUdmmBf4EdgWCBeA/vcpVYbb5Vb5t4qfabViX0m0T7ZvqWr8JX6uLnC1VhUIszhSan7ddDJvsNmYhN80cVnac9R+aNjs1oUWQaDOzwkUdI9xa6OGQi1z3liJOE6l4sHEWzvrjFci7XerW6LH4LIayb2xDJSDuUmQ7+lKrbX3y1M10NuQeJATIHILLVlQqhSMflgRT6dQQFZd3Oi99v32r/FsCsjOlLf+5+mKZ9cdly3bPInvm1u51h9fyLqz8qIpQ4DpKHkIKa0GU3dIeUnpd9bi7ma5Aal1BLzB+Mn9TggHluIcrby98c1K8jjkO2Z+yKIrbuKwpGfVLdop7G3K9sOGJVd/64r81f979s+pj9TvVv6j+NFDl6jBH00wtOtcP9+PwcL24T8JYhm338yQd/G4zzvxxGKCXGRR+vjjUpKeazpB6x6VZnUmoJP1N0JUmnPvhVIYkp0r6IKw3qYEfU0AiEWZi55AwOny9w0Lj5kLS+3ADstP5QfMociOJMhkaH7rR/6lU1F0SLiaaPb2UuqqkKD8dwGGU2CVblGQQhzySgjOLiBnTNLGrJMAUEGIKDxeDs1BwHNeL6xNzTX3xtVxPyWCn8jyOWP00mJfdrGY/FjmWXJsWVNk0WHrr6DOYPrw33WjOxhBOzoxxc7TDCjUdoqDQ9L4AgJj8kuobK4DgqiN4Oqn6MqvLpEPMOJFdHH7JUvMijhW7Ies3jRvbbFRplGjImbqixZs0sJxrukg94iGLwbCeU7PbRRJk7CLXRjJMSyBMN4s1LJ6if8s2G+k4MHnBbnZHtp0535mMw04pmAyDFXfPQQyB3emrPO+M+j5sNJKpcIJIafqcRTGQ/iXYams2PpNuJgCe/mPsOv8WLh/njYB+aA841cxtRybLomkReP5hdzFyhM9tZUU0kMlURBbcwYoIPxLbp67NWjd+06x72+uujCsO6QmIbktd57+z/9LZs3VfR478Ypqt4QQKG+Z6cVzUL7y/Lt5QObrYOH/sh6yFkeeBfUijzwbFESQmIDKYcYyZJrPQvm7q+LUqtJnJ89JxHj07MLDddSgBBrbuZq1/la1B0X3jhko1zQhKhMXGLq4M2IvTdW+96Tppg4WPUK7NTX7aB1+6hSqr/nz1URiwpvSgLsJZVn1SPnPPR98Yn81C8ROLHTCl1Pl+PArnJpyG7BSsJeZJ0arlvyKceT3vFSd1IJoCDVy5zsU/xTCH9bDuD8sUUjqEHnF+X8Vlp1K7yaI4IYc5/eKPmtH9fnWBH94r9g+cKiaT5qIdlOG+U8b5u4XhIZDOeOaz4cXxWpLhTXMmOBj0IJRmdSZNLj+RqV+NSlWlx4gY7OSlAlMgAZBpeqKpu51SVmLbnAm8ffPY4UOq85AGfj7Wig9JNeqkr++kzH6qUm2dObsg0/mSfCZp+tgrfcEpQ49J7TRR4krfpWYheSDQB3V9xwQdFS6n5azOQDll3ci4mjyLIkbOB3eeD8CAIJlb6eDx5XPGmTPJafmo6uhmlPRSrRdkBI1p6agjE9najCsASHjTjfN56xt9KQ+y6ZZnu8VSm+1sObe6s0XNIohWLTX4fuYoqhI+48va5aMr92HgABmsTrhTySEzd5tltIkiiLrP67is5prjaF1tqlxIjB3ZtisPy0FM3lppdZ4mg8IiS9+538uO7Gia1zn79DrjwilbeDRH4HT16rx/dm+3nX+h8/woy39/9mXNsvVTZ+vB+CKobFvvVDh2bWkr2/OZP/PZ19Zh4VZH2d5kwwl8IGJqxAFd02K7z/nQHhXnczCYTHtX4Ipw5T/VX573P+9eXL1Yf6zLLG8WxcKt1+GEmWZF2fDLcgmLMCBtzPCKPIi9AiYDq2Pc7PqYLw2vYNsuxty8iC0byWVOc5GLrcj0DucHWwGdOSqCoCSPwdBDyDa+o4b3L3/xN8zfdX9Q/XWpG323yz+1zT8+KQT/T08CGA72wJG913eToPVmIR4Vy1U856mac/avUPSFdCOEFBLU6vE5KUm4q2V6905Ks5It82lAwhjSZIJapcFfUkaaOP7jeHLp9E1NesMho7WXglAa9Ds9l+CxaXk3TSSnP/ePubHvfeZBeuTB1WPtNs3wur+ZMt4SmtxOArbkzdMgXpO6hw5kRQS5W38hrQvYO0KIh93FlcG2LnhgdNOcSDiJ46Y+ZIQF05jxNLfzvbqZhnVJbvz+Lg1ekGYKpuTeqS2ORP+bOiv45CaYbTlqTlt1fFZFnfdJypOxjc/Cw7hw6demKagggP3JDFsOvZxx1geIK4vrohehcEFPAAHLa1zlunnHIQOmhOu3k2hMOlvBI5242A7g7VmAIhPn5zhrRWZRKW8Xue1nG6caokXhpVsR9gfu3Yn6TeRyLA65UIvSn3NeOJZcwYcgsG3OsPdxOzi2HFWF5XAlSRQE0FpSWY6p5jNbCo5JkcdEifrFUUWBHdU//GN9FsvSHvFZW5x1bAUyZOCf7tQATqLmA2d81aou66rI6ll2VZ4gxKnMKrS6pR4Gny6d9DlK7w5TcSbCzbqF932dUc2CoCpT4DhwW8Hlp7MluyRA9r20bgSmwxRYwsKXZvayjRWFDW1EeBE0QhNQH7ZrhMCni+A6yJiKMsYZfLJtZ571sqCPGrcIs6qqQwv/iOhFMjO46Yv4mVN3vutW5epUk8A9y+y4qDZA8Coreu2yyJnw+RjjMttdyoiIeQ7e7+vwangGkMj1Z4P28wJkqLbtcXV3GkYQ/LefBnsW6ItxwC1IjT+Ow6J35iTWY/upXbl6mLlFsc5myyyv8rq3cOngHJQUU1VIIvPqdd/0rhEHAmZ0aW/10W9uXtUNO1o4y6o7f97/vsqaE9txSLw0WTHhU1OEDqzXMrdZHpvldKPrGFatCVi5Ja6Czvt4nX9i1zhKJRjJexNNziTTAnyMc7p6mysD3PvHX/yX5h+4v8BZ5f1wUOdKRYhANEUhJylyJ/6J+DbJ/5NENgyHRAZw6GEy1bvb9OAlceRJnv8gSl7NKQNDgof5oWf5MNrpPtWe0pP7OCZwd3gAH3mDzNtF4ESOkbz+B/Ve3+4uAj8l6c2GYYY0FPJ7VhdYrktYNIi6uFEHVW7YAohSR6R0O9zcX0tPI89UcGYn88OlFCZDf1Owc3iQxDTHQCYohPkN0OlxhoI0QWzD4yO59lt6BHIgCt52F9KJraf64Y7V9Ot7zg4jMRJwlkPeX9wyaBn+Lw7clDpyYWRUfWYo6QQrXEW/NHXNOjR5v86DPBGOIi/OMt9Uca70zIzSNVDInHBNObmC/1Wg+k3wfUb7csVlSP2UsAqZ8UcL5WwUeHfWsbG8/djOzY0dRSRb+O7ILWe4tgwF2IcJ0Ko4nlg6AThyOpPpDPOsCnxeCZ/TpO2Qbylw0hx+FzitpeUI/UMa0omYhW0Dx/7yuG6F4sgYdB0CswP4EJ+CYGWQodVNPOLk1HXZh4otFwWzr+qgIsZbOD204TVLAyLOc9kPl79U39kWYf9yHG2R2dU690v2F0cQMZCuuuuZ9z31RTG8sMV6dvqLw0dl3pgimh8bFn8pZF++rMJVW81bTt9eY/ejVWA353rz7GG9e/biql2Ubz7/Uj5chLfPXXhbzn6qD9+O8UVR9SrH6XBywgxgF4vl5uobq93X69F+NdrXz1xYHlFq96OL+ms/oj9Wvo5H7uv1C/YVm+P+bAXWpWtVZDZ1X8qg98pXvciUjxUilQLEpvCwdb9x7iQ8hQfKz+HLXl0+HWIa1ahz6nnDEVskjGgLSsdBpjNftQEMJ9frdQ7iJcNcY3oEGIfYRj3N/WKbie5N6XMZHWT1l36wOJdHfyVpGvMpqtKLgSN3ADZBngEW4WuLgurCKM/pQMQX4BaTduebX/wt/Tfcter53IRDB+YbtZhm+cJ2t2kCMAGE0zXTIwPAAUCQtgfJ/piap0VoM2cKhOTrVA3y8IVUAlkk1epIKZxP4h5GMoJForu5v5MhxSwHpubl2w9qwrn0HBgZUDXNM5EMcKo5PHCWQrLi9KCm3eNDQZOg7TD8gCHLB5Xw7PBemWz6sE/fah6I/iOKg6+J/bMq2rMiLuC2huNsTzWMNNWmZ5QhRmgpc1AVohje7T7KiEXbMGfAJ8xJYpC3qeC8YMrY4PDL1sQ0+c01+XzOEXjy6B6Sa5YrvMzaYQqBYld5loY/9dXTmnpQ+J611KY5x9FIAZ16V1YBqRPnLADELTrIA1RkyeBI4O96yzI+wuqf5SMQYzm6RczKurSfVCTqpig/Wbi+aILrQys9QpTtUKSrPSL6gAW0yZ9lqxO961dGLxdP23eRAtcXbsU8hjzf8ww8K3MXKqNmgLMX9s31+99iuyWOqa3BTMx8A5OoTf4DNv8sZK/1Sbluu/K4nge1OvYLO4ydrzjS1Lg6e3tx4uoFOEFZFvGo6IfCFSXBfWWqE1eC1dz6vjxmGwynIhhTqUrVdbY9ucrO+Cw/yRdRyfb0yNu2018eFi/jpb7wz+szhi+xyVZ8Pu6Tv//Fb5jf476m/mX176u/yur542Mx6BXMnA/ETU/Inebyp+bmQaYDD9O43uupzPb4+A5h9NsrfSvl7e+Z/jEJtaWNZRri+45dyGk53qYU4PDd+cIp+J/7x0k/HESyS/P2D9MHGLanOgA+PT21ceq5ud9x5setPNTj9lAx5G/DdsogpF0e6h2Hyf23nDtwc39o3bu5lloCh4I8dvpNjzW7kyefyMC31MUsHYopdhJ8OFU3J/ZahiJJ7+Jj++U1S/NkPjuphcgTSfwuVWbAFVLMtMX2Lu9TTHWR5ngmVe314TBwfc1PZ3xyjDHrbCi+FaxE6qsiuiBxBJtnJO9HYTWHsvJRHjrjqAO23pn5SSw9R3LDFRdORpvK8BG2zrOeTlk1aHvmpf+34FNslZfpvRxpw7ADNoywGOyPpve8GJwku71M9jW1jzIqWJ5iQitPk0q0KEptejQbE402lSgzKrzb6pubj+nkDdVrJs+zvjHrvJBODnCAogPSqKVppDOMyTUZkY4T1kVVfiVb3Qx8qi1chtQ7alVVNnJEdt0wxwKQaOB6tgE7zvrvjIQ5rWdKCiqRz01jFJDJWGPq3qL6m5IQqXXHooqecyi+t7n7VHJjuAy/Ui4zPhSucTk4gA5l1uhF+9nS4jNZ6cs5blCdD6v7JQDOleDdnrp3ePxuyHZNs9J15vPZvnm64nyoggLcWQPm4mufs0iMmEYP3hbl0h+vtWiMSzizKlbdR3zCLUKWM/1p7c+q4mlRXpY33jTOPm/u7Lyt+mw5X7vzogHhiJdhsLYL2RJ3JfBhJu7Z7CvPjtOzd0PEwrALV0c6VlMXHOfMUcIA8pA/hYO1iB4HAKU+9pmLTVsB8U5dXxld4+bIU1bKRsOXM4wiO5JZ0LnUVaT0qxvOFM05qiJ/1pP3lKt3lsKQnkOzomVrVg46wSmdLjVoqbxxs0Xtiv5jlsqwcr8S5FlU0swFBkXnL5JFmWQt5IEJclI3+IRuw6l/01MB06yH3/7FXzf/m/ttetBfJ+pNOp1DU94o3tUfnikwvT7IjLH0XKQxiRSGRJ7nafJa+v5YXR8aXgQj0lM/pmeq3YGQ7+VBt1Oa8vCocD6IYBqpwpcnKY8IfyUxKvtLcjsZpsvEC+IEyfRMT2LaifJox2fFPlxPUc7tBcur6dEjUw/id59PcnfQ/kqVlAj2OC3pcbzk3fTchi01ijsvHdkCTi/VpMr4nt6ZqRlxcQ3Uu2em5VhdsCn9UVE/dVwLjeInLB+Pci9PiXivDuh6u99OD45LT1dQOIrbq8OcORzmTYLahYzYGFP6aDHcXB9r+IJjvd3fbo/pFqZO8bvADYXD9Di8sB8RxO1TUYtZ3qkCBsb1QEn2rciuZGLmdhJgq5QeS8+0kBwVnIX5+yy4tOfuNuRLLsEwaYJFP1spzo1RhaxgyZmTt+gjwotPomQGyuZpXjsO8GFqwwR51Crl+hnHkbdWckU2yXBl/IK0u1zoc89Cspdx1vJoBqnDcPKcjH0Byi6OQhefrl44P1xGNoKAZVjgJraS8WF4QBc+XWUONseONm6i5gdl1LdNbUzcsDwrijWqivVqzmXDsVa2mR8zBMz5+Ml5e3phR83haYNtOLDXc0QOcYlZF1dyGjAiKMpW4jKG1obSSnujyeGSsk16OpOdEfGjUrlPaluJBmTSsDP1GPKw9bXtVOHllBlgWhEtHx7XK+UjSQ/JJbZ2PdZtPivNUTzVJ8YOX+PbOGWCOSjLyb+kkhlbDUW6a/xsXJqKI7qi+bMRF0yVw7nfx4wdpkZvJEz1+XLb+qwMG38O15PH6oz9BQizWM8DZj/b2E2RFZ3v5Sk4pXOxA5iW7bDY40ZhCbpt1oXQ2x5XwrMxxuYzH/PjD1SimnwJzC+XQBr2aLCXaslnRdA9mjf6fhmrUiMkD8z3sdEymj7zq7pcDKprCyzBE3/E3qtFU1SUKzs/9ktZcJv82JdZ/P/b+7Yfy7Lzrlp73de+nr33udW5VNU5XefUpburuy6nemb6Mh6Px2N7PBPG41HiKHZigp2LYhGcECELghQbFFkRIEcGv/CAEE/hNQIJ4RcShYcQIjkiCFCEAggkECBekC1P8/2+tU/18AfwgkrT3dOXU1Wn9t7ru/4u+kDoA2/3s1BYO/N2lA0m5oHI8lHmPVAF4MwWaF0rV9pyTM2cqvG0Fn73HiRSWwgWJvHaw1VcM3uFcoW+WwzOTF/WqlKZ5bUkJQpJH9OxT+nFbNmcc7S2izwHEw3PgmIGKSMRITdGNTNLPKfY+pU2LZNC2gyofpQgeDpa9v2x4HT1vPXO9CRgwlnBJtC4YTDXQdkB4zHDlAAJ9aQSdXtRhtQagIDLinJXqYf1Sq2sHkyRU0KPVQccrwKTXe0mIGQ5duYrZprqB8wEUUOLLz7/V/JKf0t8VvwEY3k4NK95gsPqNRjOrM7Ein3Tt4ZUDJmJmy2KSSdiySJ9sajmDRdr/UaAC4c9DMejCye7EBdJVxszxrqTEe4qZcy8ltG2pssCW6/Pyy6uXnfRdWu3+SwG881Vx36iDgCN5sBCFIQqdEvJZ9DnfdhgaLcQGozE1tHcZlsXb/2tNsyhZIT7stPow9cfbgYcko3t1DktpIGQt6K4/Y1p8lacpBCABZViyD1rdPDB5zVbsTHGREWu2J64xpeRp3Fbriy2Y4oiEfMUMXiwGDN5zU0mxRSfYsGWUIHK0Aw6wuV4kUObhx5dKjpgnYMJD5gWgX0w2QuT3TNYdaOCoYXgoH2Y9mfZauqzVE/NktUuFNPXE5gqMWcdAxXmXwQKGbLPT2Ue/Y1h6QcdIGzjTEBQx35e4A2GkIiSmkJh6bjn4WwBzF8eGJJHv1bGesBTqb5ahTlqQSrhqbjWUAFMmSDC2oRUho2cH8tyHzNKatqvmetuM3xHDjWyxbgLvI3oPoHaGyeLv20zq6k7t0WVQT1ZRaoITrHdNesDVd6hnvTIuEN39+hBlb4Ea20FsT4KqPfaWiZja0vZ9loZMn+afm74uMrLpspH2r4aZlSGJ7I3VHao29zsp5dSUXP8YJWZ0h/DwDepzK5whUr7njLUQE7HBgRagx6CgvCmOhEn9fBg98HjI+enxcNBtgsgAqWheVbOCnPP21lIMScqM7MHOKzymAdBZAAEN0UpqMSFc/IYg6rRxIUnvf4d1X9YXlTrpuppXVfYc6QJEwd0SaEuG+eDnqrOFm0fwwIqxxNw/R1nPpkwJzYKJiEhq8+/n79s93f7v+fyOlN981WdlqZ+6O4qM6U8pcbuIDSDMDT2Dt1wqyv6mm40AsAioGZV4pPPf09+R/+q+KT4jPia+K0bRz0bcXOYGg/bLVommn53Gjnbxr1lFU3Wv+szNbOMQyoMlxZnnefm/WiBt0JMYFg611zryMyMLsJt52cuwc+eR5R8xP/0je16VC6SnokYy24ASPATZuR9t6LvFNOfim19TBVksoo7/G5NR5Hk6vpyeRWdj7fwwrihi4IYXSP/1Aw4vkRf0xd66xebbnfacdkBqtTX9xOqnofR1muzvASmN+4XTMdSpdb8nF4cl4OHGzg/LLZlKRr0q9UpPthSbdxZhQ0RIS+iMaj8Fp3hAqPw4+wOJdXgOt6sjSoVgA9iYQUofY7tOBO68AumORQNKH06u4FYlIDQjLRUASQ2T1M6/u7YDRkFBE8W64AmZpaVpGJB00EdF7kPfrr3Cr2iyEeFt6bnox4QPZrO8qHG8BsQe/Y4p7dD7TKDf1hgnf6lUTAApYh07iyLnYpKiJkw+7oeNoDzxGzad5izag/Zh/t+l8rQMsEUjN6YLbImEWtp+hRHUolUqz54g3XNBXVemC+gLqViNMkFMF9hqGShge9JWXntZ0EvkApze5HBnxNqHBoOfzpAMbFOp77x8p/k1LNSZZi83G4yjBsodxtIwoFM1zPwJ0DJGtTETNrhVbErB/dX5ZkyoK/QyywXC7nMof9x6PZClo5e8bodyqGiRrYx49p+LOtXg/pReFMtvzh+2aZVgoG2TJXo9Xd3fT6lu7CrgjYut6JX6xnde0qm00ROMrrQZjiRYVYy4QKWmgmLiyR1In1mVufFmkv/QlJF3c6C+ejLITsyWdBBFYmuZOlUIRojqXQD2cJ9pi2PTFIqPZDmKDH3shmeq/GsqJaJ+RguJhyDKQal9WbXnIt5TpEeyxKrC52ZO4An1EL5FOAkI/+n4x2Her342ewuUgKcAzHeoXtpfXa39WPnXQ756uSXogpmoHthWAw74awmx6PiZOFHVAzLs+ywyjlm7fzu838q/7b+szt/tPOfRU+ccMxCERS1w7pJ14OIqk/mengDU+Ao1bVl9He8F1tEaQgWuOmAAcwgvezKEXG1iuggjg9gu7FSKMctiO8sbsQWtwxxBhCsbZxHUpBaPZWY9W0GW5nC9QA/PyRTPheHl+vtyo9XhPTyhmXGEK3MVsUHYRGf4DKWR7qJ4CO82TMImbF244Cp46ptoNtPNcD+hEoDkydwzMtkpIQb41WWo41KykTvu4mEmsJRanepTrjcb+7Nshqk/XCdpYnVMxjHCl/bQSb0hrk9dKo9+4Oy8oHPNdYrrXWpdxaO5VqnDVX9oq5PqsNxeIkqCszSVGMLgx4WUseUPG2i87S5KvI7VhWUnIxbTrRzRa9N94Nd5a37YEJhJD2QflC4bKSpyTC/7+hzzeregpmllj5zDzAFUFIli4dRvPo2VgeY1NGTHkymsrQvf215b7Fn2PK6SCsMo9wwdQwzUtYVxSDdf+eRnDbZfrF4q/3zgW0P5YHXWZZREAltktOFq4e5zMrczDJb9HuuvdM+fr9+pnf/fnvYNsjOdIQVYkBO0cRw2KPTdeigDVEPfRaqkZ9Woh7NTZiWU54tJdU6U22RjnwIwDwBYKCsBoby1WF2kIZyCE80o6UpzHE/vxjsjX9sl4J49iibTQczR0Xj+8X8hz+fNraV5Ruz3Xd7vSJ763j+CXv9YPfPFWmf3lJh9i8rs1dMqlY8p/6zf8/sUxDX2l61/pm728JqfUeIv/P8e/Lv6V8QX2EOwVbWOxbxy6Ijww3ZSeoFvNd09no3K2SqwZfwQ1k/BHgGQJ4NKKPA3Reik1J5GhFuUSCw7eTGtxv8tpDUwixgSIBfI8rnhik0ZfEsthjoYHZMv9v6Ry3KrvIYtnvd76Lj5hqQnIvr9TW7h+Iw3/htd/Or1VXUJk2u19G/k9uijutgIhx3cYMsXFKlRG9wvbhacFOx6TjrzKtjK8EOVRThQOednW9HZ48jruTXzaQ3Smt0BDYRrY58OeDk6ITVTTl0yS7GyCHXlEGpXUR1jVgqIgUdFCuwuEXUG0nhCe/3RSubxA+oVEyzAJVJeihV7lvXF5WWcpelIkKVU7PvMYChjzzo+8DK3hY0b/DT6HH09rjXo46mcIENkPxeel6uK+0ziIvnrjWZZY5BiSFJEbLaUMJVrKIKzyk6XXKiqoIJ4pCWgj9KNwwCVkQyOZTXaxjZs3AFVK2Vs9Q0TdIgCpubSYBwFLADFDUGDQtiQABSf5FizsCfOb8fnascd0mptNjfJ8HCOfPQBleJ+b3kSfno3v2xrdSJ3qcD3gSIuygxFLtJ7jTYfPQma1NOPaXG+asUE9/L3OywvqJCoz48PH1aHDwpT9jw2PrGTdNs065eMyOdFpC7UdZWvkk1lNqTognB4JqX8/xTbf6RFIj7JKmHFwejj1LuzPpZP1VLUw9CUQKcmPVgkxvalcs+MmhW3itb6GfjX8iPps9c2B/YM099uDym63sweOpPjqQbaTOkYoSuo/Qlrin1ehO6+mKvD+BXha4mgyf30jlzYN8N2V0xHFVvN0WVtCETNkARB3cibQM9J3u+lxrxIA0UB37l+T+UC/1Z8UX4dyb9OKqOnG4T0SRFZ3bZRghtv0P38ZqPzyp7Awy4vu93A292KEq4l+h37fiQnbvjFDyK/MZgg0yYMJ8Ikv/ry3iIrrZmbjjwvHx7JqLEHifHNkYgHnwMPkQv2hqNMBiH8TdXETWDviC54uXffbG1iWNETTQXYARwFOjcNhUXSPcf0nDadjRYiF9GTHDk3G4hmCwvzlPn1fVWm2Jlks+N910i2xEEj6geZjMILaOtGyU3XlUY2d0f7as89dkAuzek3UqwICbUoCZ09JKo3Ca77QaPLs1BFqjGPy0KJ0dUCMM0O2F1fCUh3BtVOzt7bV6wBZEDb8UmSQ70Quha0pmS98sasUlEqHbCUm6aZ6aONWJ4rqvZWZcHFZKpszzqdYE+iQvQqKOwwDvCAjgZnhLQi+vMsEKVkY0H3Mun9M5zI6+oINVvqvunbXVqRu2soioen1e3UFbXEFfXoBfhmwcOEX0Oaw3pni5nFJ4c5Giw/jnwEJF7RzWt1x6Y6NBPf1H4tUx309f9sFY9FrCIrksgVh44u++Gmcz6ggJIRX2AKlwt5UKluemtj8ayYPAt1f5McQLVKUC54lApCnV0+cpRz+4v5CCbU8RKSrOXnBULKyberENDcYKxUErxiKoI1006SDNMfUSBAULCVmiw4GX0D3vbKROfBwuLNpaRB8tK2VEvrX3RZMNCezcLfV1V0AqVbBjhLMWWNvUDuztMR4AvKJmawh7Wd06mr1jtdhLx2ef/WD7T7+/84c5/Y98gZNUzBoHccF06U5/trgWy5JSeMfXB2Y3nqCPhXEZNpRer8U5MaevveLWdDW577mfCxqQZCYk3W38GJkdBpWRbftzszZarWCqbjozYoZt5D4SFuBzcaORGMRi8uUEEJMtrN24HLk9RIOeazlXZA7p65idptsxS5+RQCozpKK1pav2wAz4eFtQbAZkRCuoTbYSpUkNZseQSHZfcY2WcY3qGozSga+89lbmTfuWrbPKsHlJCrSAijwylnGycWJnJ1FtnqZPehYm1KwBTMaYXsWxSQ9R0IY+L0joRU3va+2t0B6kfDD7JbINs6POiKfF4Bpm1vihCrdRJ1fv4+M1eWevTLN8P4V7l/owz/d3q+lHvJ2wZMo03W5xpsV+5Ner5QOnsjbPXH+zr1h4E3+rxYdnshXBa+X2jetRrL8tgz9/6SDOHEWHqUmenWTqgLrgYZqv2LMhBSPb6d/PTMBpyr40nWCztas+nZdgduks6+Y56jFpgJAZDIuodZG9kCnluP/nweDz5enp1rMyCvpVe8u9Favp60ivUmB554VLMvCZHPju8ZzEj/4vP/6X8B/pzFP+uxOfixvVG4k+cimHkp9rO7YkJ5AYW7DwNL+Tivr7avFgAdqH8Kr44QlVmXJdy7Vno1c1ruc5EHnkSiR6sNPbCDevqmh1gLhfbY9Npo1yeiavNjZjY+ZZNij6Pu8eo3rCdaMdWMXZ9Fy9srSLBbjDsd3quehOR6WsWQUTr22f1FPz2uhQL86JG55ETlpZLs1r07Ypn8p0HDlAwEYqykd+uqJ04p4ZByNJQCKUTQJ16gXBHqabFDt9IEJOgIojIh7VXAjUeZpZAkuuxUWaCXr4qc5gu6SyhYgZg7p5MMpR5CJvjHO4qbK4OkBTlFwVB6AIYaGS44+y+TPFJgV+wmGjTccp00kq5r9AOKfbYYsP5XMrTKllRxqATmbWoxGQvGVNJ6XFYjcwhLUj5iXVEP/gWJQGYToUE0ogRKgIaDCrIHCm0qYoGVDg6k+JjicJalL5axTqyhWetPywYx3JAf/k7rqfXaXhqLNRexlldPVmNTpYZJYlHhTr3+z/1JpRh5CN/lvF0Cip/tT8vIMive2fVY7c3yvb67Z66DCHoI3skxXw2kyOXj2Azu/JYkwKQuK7ckR2HypvH7fVhBfZrcq+aLpAwvZMZ/cjpbowzsI417xWwE1gX97O+BbzBJcUA+qsiFAl1fLNETjFuoQuUpV7X1mc6M0/SdpkZ1Vwsh+em30wcXT0bgjXULiiRQRUAABsAf0Rqv+PoDhdBV6qnITOg2LsHkD9Mw2E9D7oBFf1fpRvM4x2MqgJ4DacmumZbJQ7ksZ5Eb+Pv0Yu/yB4zm5t2kFc8qCdRuw3nolvtxHaz3VpZsEUw81S2u58p1a4LGBd3djMoT9toQjXgMrU713F60xFKr9jSt+v5uu4RtKtn2/6uy4pobYGuUM9EBGR04+nlIvab8Se1v1eXdrmKYHJr2BaYh82RY76J7NKOVxtdjxfMTWdCzTMI8L8wqhkkn6yDhJuCtREqHIxXUdQcyppZmBmeo9GxKsGjELlkuBSdBuCYqczfs5B6xGsGdYvCjO6ozkJ7YnRFDz22k4WEwxv9CM7TYUph1U2nV490r9+MwMpMGkMZcDfNlE4T70o5p2xnMfFoMP2QnlUzI5WDvnyGWhUwfvQqRR39dlOIZIG/SgEgYA9fM/0LyqBwhYE9uhOpluwOn0q9qPtL/W5KT3ibV7NqVU7L0pQgwLPhXkyao+BPTiq5mDvbL6kVlVPvEzl3002x5/2wcF9G6GLjKgotQEhq4fKwufvpvdm63JW1g4eCDxO7i7ecTPZmI7N3nO3tVvkyobOwn13kr7bFxbhWa2dtbl3d84Uc+PSdvR8tx3N6ps0hL/d18k01CXf8vZkfItZpDx8Iw1BW6tSYTUFJUBmu6Hj6LZPCzYq69G/4k1SlHrse2I9TTdRIpEDl+ub0QLvd2qVFU9Ur6wzvjD/6/N/IpX5r53+LnjiLlVyUHFltbgQKkCq2HMWuokviNqcb48S9Jx57oIci6bmzVHthq8AOkmAmdhDJ7VADhyfyKT8k4N39tx2xIL/x5KjTE2JWJzeRN+UeJqLIuxYDTd6DbJ12X5jdmIvBNI5TO8YCfwJ5zd9pVDkUtm9Z3+jifNNs0H6tGXjFxNE+ABJ7AuXhpXST9e5YqRx1G5XciaAChQqOjB5sZBAIbrP1MN8ielpslKY1cuNSDnZwacReJoPUH7oo1oLF0lS04EbhlNJjEdp0SeHYFLXHMSsam/AKUdUUS/3C216dskI025g5l0HQtpQPzaDEAfRQzNKyDYIJFQYND887uS+jTs5lAUAg+TwEg6VrFnJquITrZS2WsNlvR+VC6BGJzmQ7OFHfM2o3b5QvqcmRqljtVp/W5T2RDIo75bPsU2Uxc/ZqGT72QDlLSSazj3z/pfKZkpUI0l2ce+FDL1BhxSMl2Zhe4V4b3XnJ7WXN3ezMJx/V0wGEeTPfhr69Sl9qPlIqqgAx6sJZeDjqD4s7fT+krrSh+4P0EtKaei8bkryokoMCwjDSjJC4QMCk8FNK1ARZ+un+u3QibKuz2ePgJpDJsNXo/ZN9W0nqyGD/S8eO6o/RnfvDd43cd0FQwf8WNvZZoDoTSXKZMhXpgz+xRZ30gFHsSZfaQQLuM53ZphnluoxeKf/8+e/KH+jfFH8YNVZkBNRvDVI6S5RlZ1LPNOVznkGWLFq/tDymGFhW7efZJ2o5Vsi0C/6TbRdRFfZy2Y/inAZ/urFuxVYVGW+4xRia/s3MBO1Z/wZxYSLsuVNZgG8incGo13d92c0pGPC/XoGHBHD/JWMHV9ZsV7xFlychugRAiY3wRXRasafjbURMXEhWm8HF5oVWytME2Y6/xuXT7fo1Tl1YTmHNuJHry6iiQqluabocbBcGPGx848s1hY9FR02F5MSAidmUPiEleHUZoc9iu2CBDuL1i0r7aXIO+QVeQHdyvvwj+e3dpyGHYrLph+DpnhcJiHVSZz3Fgh+VGGWyojauwF4RsiUZFMZAuocGgYP1odEQZWEJBPQzgpX/KFeCR4R5wiTZw2xWAF1kwWKAkp2IJ9amkAfxnUc90E6hSFdRrwhHWLPFuUniKpaFB0CKoMRSV6loeNgLWBslX6dKdlkVo5SyYFWkgbK+MNha0iuZUapY9JvSJDuhcNgABA1SBlAupVhkOzVmz2W54lkPKNV0kuYz+2yVKdG3dzIZKCoC4qeMq/ELfOrp4jno71Um7Y0U5OiDmJtkZZ8mIrpZZ6KwwudW5VnQYGMkrFyO6QawDVP91YNDM2numeMvGQpfrhhSpv4y1Ku0EuV8L3vVybTWpXO1SukbgP9clfSTEVU0VIyLHKsWA6Klrug6lzxuhsUqBjLoEyBfRnVqUL1gZ1p8yv1opayhOFD1D+tJNknMnrx7L8sXzswhsyNHx2t9abVn5F+pqsT08uloPMzpomrr+sWpoYOuGhf0OnubIrfgB4EOSzNMxL5bpapoqUDWn3lzzzS69pL1SKuhqk59xqM1JYblKB95XxqhU1mIAsayJojGwasE7C+0C/TVM9MYu7R6NmTDXrpPucMT4FIKhVR7Jx8vq9eT0aQXJpjcjxiqb9TwGL6/IVXTBjdVB4drBLNQEGOiCRnQcspzwRZUK0WZUEUGI1IXzLlZTpPdKZg5Ai7BKp3LqjJ2QwlL+Zbqjq88/3fyu/rtnf9Od/gQvKfOaalTiokKg6icub9ko6ZOYy1W3FiSXJ9HQAjzp3new+ykrbFdx//gkl3Mk/7DBxxPeQMLNMbNP3/Y/Jnb9i4OLldLHkJvcbftoEhY//sJB+V1G/sCNoAy8eNM5yWAqh/Mkg6JtrraPAGs+vqwo3Ju2eidswB9aT1gf7uko152s+zhQHy/orTVeDNpFQBKdBP0fKTXpiroqCaF6iuIJKEYBnSWXepxNFmQQYRZtS50mdPdBqUYmqWgQQEIxpKKcSSILpaPsWAYGtsAK0BB66MgKQ7saeA+jHwQXO1dXivT8+rQ5HCUD4CS9Ip942dpWxwBeMrYVpeu9VNjDlaaPZfqEs5oSsShpJjthuRukqIAtw+dc5QnAn/1gt77B/+WIoSElQgevUJfwKcaIhG/I9Mka4K/ewCMVhJ6Jp/v21PV1ww+Domui4QOqwdG1aNwLsHLUElhdI8ibpPoyuYhSSlLh2hcSTHEwvcC5Rg1QFqz4j96VYRPxTxypnTSo10f9Qw96HfcfqLGTn0sTxvresnPf/b4e4Nf0ipdqItCTkLPngx+3MMRLrENtWEOKmw9JXMKWXVSDZrdWWVm7HkwTvc+XXyyb/InpqKLmoTip47fKUv9BhUUqpbiQcja7NjeU7pHZ5GuwoTDrV+06i2XH1IHdSlkRY2NR43xW89/X/60frbzX1BjUAa0jHi0vINgnDsvOnm7wi3qOesQnG9LkMEWf94f9CM+lLUM1/Sqy+g5seHSox8VF1fx/xHFFR9WtL5Y2jLkSVGxsnVJXa1fyJyciUhvjNj+uC+9mRFjWzS4YMmleA7wRtZR+ZA3OdvmgOo9L0wYVDA1SPlp9wAQJWWejpL87j7UrsdAOlIbl7WFHIpatmWG1pYymGUqUckmxxSlAF10mJb2oBmaFRnjjAYllYMDi/O0oDrX7PPePUCbO+1lTU2Hp8/1fZ7s4imzGYVVdJRUONLfGSYgl0Or3/ZJj0rChDV0MiOdzDRl9UzR3Tx0YoEufCTXQe1aX+mjRn0tQbEOvQ+bwlsR3Z+e5xdVIUSoe+88AQGRQvQyfU8/mLZ3V5/R9UD5JqnlAaP9WCQM2QioBTVKzEgehjzUiaYg3djhR88/W74/3/vpLH3kKSS3aZ7jHC9KkXs6eAU1wD3jHqbiUf7LVe/X/GuH/ff3DyA0oM006Fk6evld56r+YjX4MZ00VSLbnIrf4CKRqqLzUylMaU5cstJq6E78y7vhgQ13B29Dokd8/fmfyAv9KSER+02krW/6D2JlOtj+tovGsf86x7qu0/fsb8vmIlZvU1SWw/kN573PhghcvF0yc4OLyYjtSy6u2Ua8QwfEYek5lnSrx1ydsnvU5QtsDoMBBbjsYL1zgYy5KBS6opVwJPptOj1QptBvLq5XNw4wXQ37NCKn6Y97Md+tks8PqaZIG+/oekPfFtUdRSsDSG1fT1O6rFJkGJTK3MPzniomBT5aJeKm3EhK2gChzkceSB1APyvmLBhmSZ+ykCAyARU2WVZ72wyrCmYOqZ6BzqQjhFZROSa85VU7JqrMQQOaFGfKM+MDnkBRg84Wrcu8Ouv5MqG47SrFrn30hGMJNaPvhMJrqV/zg8KulnCPSE9yYI9hQO4KU9LRpUMwXBZn+8zA+3rdfn5viN2axiK/VA8OLHS20v5Q5+9Mxl/Jsr2E2t7KpRnUb0NAFUox9uHZ68XJ/lSoUNFznWqVzUPzlxaTV1ygLq/o5WnmTLRZNao9cHdfG41fWiZy1MK1qja/DFETSi1e1SqzUbs7AdYeWh5ZQlHmSE2Ox69bNbXZ2gS/l/WPmiGFGUqiSTlCNqazWGgLjoFzstz1Izzf333+p3KmvyCOEYsHQyZNwRIJrGyWvkSJAiDYOrZjS6jbcPSdCfN/OWWeYhmwYcQJBfVSYHvONK0rXiyvV8uWEfcoVKbM+uZA2rLM1XLBNg+nYtV5NlAJc8IANIaAMbqGSUbxWb+6WU7j0QUAB/Oa68grRd1zuZ3MxMrrakA1ksEbAnyOmtM1xGpWV4sVIHKAxWKusrri310NoGeHnd/11XouOhErjGXPk1/JptHlj6dZW+F/KIcXVL/L3thmFT8zOsfoTmZ1ZlmYdsw0O8eoK3RARdcmJZ29Y54JWDGaOaNJ+4lIBT1aQRU6E9Nli823pqA3UrwKBvYdGv7YQmvecFv2XnMlEB+F86uiVyOwRzOgqLngOIcIj92UiKwl+jB2K2MVS6q7sT+GAPUYGxGn36czkPSCouTUYydMXn2zjK2wolSyHCZVv4ZUlTrJf6a+sNVq83jc+rpwOfy52SCINQcKuDNZSb/VlDp6i+SoLj9e5PN1kmW73v947t94Itczdxg8wHysNp/sZRpKnrnXckDtkJ5AEwdwcq/M2eR61PS+9FqRzuvXUAw5mdMB6Vfj7L55uem996R+pWxz08sURFs67zp8A3gxeFvMlWK/eirJYHTh2OMIOCDqFjW8wxMBzvcfP/8evfGv0LfcE/d3du7MNad9LhViPF136qybD++SDYs+MqJkabpXdbPGhP3EEIh5JCA679Vk+wM9RRKR3nSaVku7iLpNm23ZvgVlYMG2HXTgX+LGgOI+dxPdMPDGASyiy/bEopDRqxHAl/g/21LPwLTAm9VA/JhIlaS/kisIN1BCuTwc3BAPo4jb4kUbkPyHJFUpHrrZbmqPgo+EJ0GlLjV8qbC7qsph/mBq3EuqfMz+sC9DMlJ+SvfBO4wYQq8SszR7mAw8u6mWVQK9Mr0ydQkIOFBcGhZ7QGmwyDr8/tivzwABCuwEUEMml7uuOPUzuNnQvc07HVnU0ZFjmJTo9sUEJCYNTgvYLKCxWDTunrpFoMAzyCo61mGULLyaUI0EvrQI4oPv5LtQpcxgheILyDcovaGERw+8uKSHKthcBewoqN0NVPf35MiYQuefvTzy9VgVnxq+92QY/qrcf9S7finx7xaDYzHYK5bz8KiYvlz8lUHz3uvavmb9pYEmbSX2yuITcuploE61gV5Lrt09VcJjUcboguuP/sjIXGqW0QKTGN8TMG5QXDB5FuTAVVrkWlPCsrZMLCUjytnUKjtvMw8XQnknmiKlMwz11cn88oEcz4v6J6lmHbSyrnH06YpOy/Ju7fdNWum+KSv35cxU3k+drpPGigysQZP9QQ2353Qsxf5Rsz9kq6VrIPeKhvqCwc7PPf8D+b/U5Y7ZqXfGO4ud453znX+x8693/tPO/9j5PgXEWkzFvliKtbgnNlQsvS7e3Nk53Lwi0EBQhZNAPWC9OaTi/GpzIZ/xAmA5vB5aqtxYsvzqkqrcgWFBN8uWBBT/gYXsmwUywH25vrJzNqy/n1xfra6oowfRFltpOpEwJU6iNbG96qZ6QEXdmBeLZ6LTW+emZZ60dLLQxgy7w8m46jPGghq77A/psM/FIT7uyi5A22LIiLEDFHoNqx3g1c31koeVpWhaiBlw+gXOxAyv15Y+yeraRiGKa9OnazC0g/615SV7Q6+4T1em2TCSmr9AiTrPJmto/uwd/BxV+fpLOT0uv1F/8F/vU/+ozpYN4JPJPzvb7LVLN0iycjJU4tfkl66mp9a9PpgEe7pZnCrt6blJPq/ocbBl5cWEavLjvfEd6vmHTuXUGtrC9ZNAndfnS+eWiLH9wrCwmxjShZbzbOlHyHz+8llhF358ULr2IMucG1J9uXQeauoVHCwTwacoVHL9kR++K0ZZOoYdyY/gr5fHZs/QO/6CpMc5uN5odt1CGIUq1s/Rl9P0kr4SOtj13uZMpnf+Vm/2pcaoXOn/2M7NPWXMpFAN9fe6OIBQa8irs0FIH4L3dk89VLPZN3rvPvxTTMv04V9YrX/90T+6ntQfn3zw3U9cnR8Uo/Dut6/eppP+m40xr1ZVvT771b+biM1k8uQlf+/4o4d1Qxk1yYAs79ct2Jyr1aSdzUJYurEMY2DDINMj/+il3hiBaRiGsKjnvbiiHBzcS/LElKeDHtVzInXZKz+SyMf0GeeuHgbfa77SJgOjSu8Lam5SKf+6qKhRdI95jPqL3g2cvaaK3f6gPC7yq484HUbi6xiPruvHjw9++MfvFiKxB1SGnNmjqmfwTLyxhlTDD96++rb0afib70FUblEWf/mwPbg4+/jea9lPfjPra/1N0XzhC9h5JtkHX36n9ZTOZ/b1+Xx99DNPTv/GLd/zlu95y/e85Xv+P+d73saZ2zhzG2du48xtnLmNM7dx5jbO3MaZ2zhzG2du48xtnLmNM7dx5jbO3MaZ2zhzG2du48xtnPn/O87Aj3tHf0N/Y0fu7PQOegf6G9//Gn7u7Pwf/gBC4AB42mNgZGBgAOLrh+yy4vltvjLIczCAwLVpRt8hdNd2Bob/daytbCAuBwMTiAIAS7ELYwAAeNpjYGRgYP31PwpI/mYAAtZWBkYGVOAOAGrhBCwAAAB42m3PKwsCQRSG4W+W3W4x223aTRq8gMltWmwaDCJaFkHQaLIY7IIY7dsEwWITi/4Ag128vMMOWBx4+M6Zw8ww3l0lsbyN5B8tk0YNLfoTLtQdLDBAFn2sUECUzHQjy8hhjQl7FTIF7jFDNJBhf4cHdZ28kk20UaXfk0uMELpz9s0iswPZI7fkFDH12Z+r687/wd9eUvD8pXljzKc/TkyfJ8Mk7SyYSV80s1AveNpjYGDQgUCmQ6wu7HGc87jf8F7h3yeUJyolXiE5Q3qVrJP8NCU5lR61OxoxWlk6cron9KsMFxnfMa0w/2JlZb3HzsIxznmS6xv3OZ4LvKf4fPC7E1ARpBOyJSwufEukStS2mKy4sISupH0pn9LVMqOy+/DCOQA/+TbcAHjaY2BkYGBwZ3JlEGMAASYgZmQAiTkw6IEEABOHARgAeNqNUV1LAkEUPaMWSmASEdFDLL63mrkmBkEEPhTEkn08t36ktLm2jtZTv6Mf0lO/oOwX9Fd66szstIkWxGWWM+eee++5swCW8YIkRCoDYJMnwgIrvEU4gSyKBidRxb7BKeQxNngBD3gyeBF5kTM4jVVRMjiDqqgbvISKeDT4FWvi2eA3FMXE4Amy4tPgd+QS6Qh/JLGRWEeJnrbpxoKLEAG/I3jw0UMTV7hEm+HyBBiQbeOU55oan9mQlTbrVezhHMfUnxDNV23N1M0rrBnFBW8hhvQRoM/s9CQXDTJF7fyH7VIp6Vrpx3GFzd2qzN6y642eJ9Ehqzb0uL0Nh6eCMnYZzj+8//ZOB0SediyZs3BIrqd1FlEfrT/et0u95Jwhaigw7nXYZEI9f1prkwnpo6A9etxCbSrjTc+oVu94pCda2CGncg57l/kGNSKHzPcfb1HdoVbtJemgqfsPOG3EWz3u3sAdGbVNyAr/CzM7bOd42m3Mx04CYRhG4fOCgAURvQa7qP/8zFAsi4k6NgTsla1KYgwbF168Ccp87jibZ3fIMOqnzyvjOgZllCXLIksss8Iqa6yzQYVNttjGEeCpEhJRo06DJjvsssc+hxyR/D5OOOWMc1pc0KZDl0uuuOaGW+6454FHnnjmhZ4mlFNeBU1qStOaUVGzKmlOZc1rIf/28T14D1J84euz71zs/vTO/RuY3qyaoRmZNbNuNsymGaf6JDVKjZKDIVmuNI4AAAAAAVpw2jcAAA==) format('woff'); - font-weight: normal; - font-style: normal; - -} - -.weepeople { - font-family: "WeePeople"; -} \ No newline at end of file diff --git a/spaces/merve/data-leak/server-side/fill-in-the-blank/scatter-plot-colab/two-sentences/style.css b/spaces/merve/data-leak/server-side/fill-in-the-blank/scatter-plot-colab/two-sentences/style.css deleted file mode 100644 index 22031bf2cf6e8fd83b62a6868a6fb2426bd711af..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/server-side/fill-in-the-blank/scatter-plot-colab/two-sentences/style.css +++ /dev/null @@ -1,84 +0,0 @@ -body{ - font-family: menlo, Consolas, 'Lucida Console', monospace; - margin: 10px; - margin-left: 20px; - width: 1130px; - background: #fff; -} - -.tooltip { - top: -1000px; - position: fixed; - padding: 10px; - background: rgba(255, 255, 255, .90); - border: 1px solid lightgray; - pointer-events: none; -} -.tooltip-hidden{ - opacity: 0; - transition: all .3s; - transition-delay: .1s; -} - -@media (max-width: 590px){ - div.tooltip{ - bottom: -1px; - width: calc(100%); - left: -1px !important; - right: -1px !important; - top: auto !important; - width: auto !important; - } -} - -svg{ - overflow: visible; -} - -.domain{ - display: none; -} - -.axis{ - opacity: .7; -} - -text{ - /*pointer-events: none;*/ - text-shadow: 0 1.5px 0 #fff, 1.5px 0 0 #fff, 0 -1.5px 0 #fff, -1.5px 0 0 #fff; -} - - -#graph > div{ - /*display: inline-block;*/ -} - -.active path{ - stroke: #f0f; - /*stroke-width: 2;*/ - opacity: 1; -} -.active text{ - fill: #f0f; - opacity: 1 !important; - font-size: 14px; - -} - -p{ - max-width: 650px; -} - - -.bg-tick{ - stroke: #ccc; -} - -.tick{ - display: none; -} - -text.tiny{ - font-size: 9px; - font-family: monospace; -} \ No newline at end of file diff --git a/spaces/mindwrapped/pokemon-card-checker/app.py b/spaces/mindwrapped/pokemon-card-checker/app.py deleted file mode 100644 index 4c93635abccf03c3ff442cad3049976fd9328a9d..0000000000000000000000000000000000000000 --- a/spaces/mindwrapped/pokemon-card-checker/app.py +++ /dev/null @@ -1,55 +0,0 @@ -import numpy as np -import gradio as gr -from huggingface_hub import from_pretrained_fastai -from lime import lime_image -from skimage.segmentation import mark_boundaries - -learn = from_pretrained_fastai('mindwrapped/pokemon-card-checker') - -def check_card(img): - pred_label, _, scores = learn.predict(img) - scores = scores.detach().numpy() - scores = {'real': float(scores[1]), 'fake': float(scores[0])} - - print(np.array(img).shape) - - # Lime Explanation - explainer = lime_image.LimeImageExplainer() - explanation = explainer.explain_instance( - np.array(img), - classifier_fn=classify_cards, - labels=['0', '1'], - num_samples=1000, - random_seed=42, - ) - - temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=False, num_features=10, hide_rest=False) - img_boundry = mark_boundaries(temp/255.0, mask) - return scores, img_boundry - -def classify_cards(imgs): - print(imgs.shape) - scores = [] - - for i in range(imgs.shape[0]): - pred_label, _, score = learn.predict(imgs[i]) - scores.append(score.detach().numpy()) - - scores = np.array(scores) - print(scores.shape) - - return scores - - -demo = gr.Interface( - fn=check_card, - inputs='image', - outputs=["label", "image"], - examples=['real-1.jpeg','real-2.jpeg','fake-1.jpeg','fake-2.jpeg','real-3.jpeg','real-4.jpeg','fake-3.jpeg','fake-4.jpeg'], - title='Pokemon Card Checker', - description='This space uses a resnet34 model fine-tuned to determine whether Pokemon cards are real or fake. \n\nAdded [LIME](https://github.com/marcotcr/lime) to show what contributed to the predicted label (green shows what contributed towards that label and red shows what contributed against the label predicted).\n\n[Dataset](https://www.kaggle.com/datasets/ongshujian/real-and-fake-pokemon-cards) created by [Shujian Ong](https://www.kaggle.com/ongshujian).', - article='Can you guess which cards are real and fake? \n\nI can\'t 🤔 \n\n([View Labels](https://gist.github.com/mindwrapped/e5aad747757ef006037a1a1982be34fc)) \n\nFeel free to like if you like it. \n\n![visitor badge](https://visitor-badge.glitch.me/badge?page_id=mindwrapped.pokemon-card-checker-space)', - live=False, - ) - -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/mingyuan/MotionDiffuse/tools/evaluation.py b/spaces/mingyuan/MotionDiffuse/tools/evaluation.py deleted file mode 100644 index 25d7db002d6507378c4fe1fc17c52f206f6ea9e7..0000000000000000000000000000000000000000 --- a/spaces/mingyuan/MotionDiffuse/tools/evaluation.py +++ /dev/null @@ -1,278 +0,0 @@ -from datetime import datetime -import numpy as np -import torch -from datasets import get_dataset_motion_loader, get_motion_loader -from models import MotionTransformer -from utils.get_opt import get_opt -from utils.metrics import * -from datasets import EvaluatorModelWrapper -from collections import OrderedDict -from utils.plot_script import * -from utils import paramUtil -from utils.utils import * -from trainers import DDPMTrainer - -from os.path import join as pjoin -import sys - - -def build_models(opt, dim_pose): - encoder = MotionTransformer( - input_feats=dim_pose, - num_frames=opt.max_motion_length, - num_layers=opt.num_layers, - latent_dim=opt.latent_dim, - no_clip=opt.no_clip, - no_eff=opt.no_eff) - return encoder - - -torch.multiprocessing.set_sharing_strategy('file_system') - - -def evaluate_matching_score(motion_loaders, file): - match_score_dict = OrderedDict({}) - R_precision_dict = OrderedDict({}) - activation_dict = OrderedDict({}) - # print(motion_loaders.keys()) - print('========== Evaluating Matching Score ==========') - for motion_loader_name, motion_loader in motion_loaders.items(): - all_motion_embeddings = [] - score_list = [] - all_size = 0 - matching_score_sum = 0 - top_k_count = 0 - # print(motion_loader_name) - with torch.no_grad(): - for idx, batch in enumerate(motion_loader): - word_embeddings, pos_one_hots, _, sent_lens, motions, m_lens, _ = batch - text_embeddings, motion_embeddings = eval_wrapper.get_co_embeddings( - word_embs=word_embeddings, - pos_ohot=pos_one_hots, - cap_lens=sent_lens, - motions=motions, - m_lens=m_lens - ) - dist_mat = euclidean_distance_matrix(text_embeddings.cpu().numpy(), - motion_embeddings.cpu().numpy()) - matching_score_sum += dist_mat.trace() - - argsmax = np.argsort(dist_mat, axis=1) - top_k_mat = calculate_top_k(argsmax, top_k=3) - top_k_count += top_k_mat.sum(axis=0) - - all_size += text_embeddings.shape[0] - - all_motion_embeddings.append(motion_embeddings.cpu().numpy()) - - all_motion_embeddings = np.concatenate(all_motion_embeddings, axis=0) - matching_score = matching_score_sum / all_size - R_precision = top_k_count / all_size - match_score_dict[motion_loader_name] = matching_score - R_precision_dict[motion_loader_name] = R_precision - activation_dict[motion_loader_name] = all_motion_embeddings - - print(f'---> [{motion_loader_name}] Matching Score: {matching_score:.4f}') - print(f'---> [{motion_loader_name}] Matching Score: {matching_score:.4f}', file=file, flush=True) - - line = f'---> [{motion_loader_name}] R_precision: ' - for i in range(len(R_precision)): - line += '(top %d): %.4f ' % (i+1, R_precision[i]) - print(line) - print(line, file=file, flush=True) - - return match_score_dict, R_precision_dict, activation_dict - - -def evaluate_fid(groundtruth_loader, activation_dict, file): - eval_dict = OrderedDict({}) - gt_motion_embeddings = [] - print('========== Evaluating FID ==========') - with torch.no_grad(): - for idx, batch in enumerate(groundtruth_loader): - _, _, _, sent_lens, motions, m_lens, _ = batch - motion_embeddings = eval_wrapper.get_motion_embeddings( - motions=motions, - m_lens=m_lens - ) - gt_motion_embeddings.append(motion_embeddings.cpu().numpy()) - gt_motion_embeddings = np.concatenate(gt_motion_embeddings, axis=0) - gt_mu, gt_cov = calculate_activation_statistics(gt_motion_embeddings) - - # print(gt_mu) - for model_name, motion_embeddings in activation_dict.items(): - mu, cov = calculate_activation_statistics(motion_embeddings) - # print(mu) - fid = calculate_frechet_distance(gt_mu, gt_cov, mu, cov) - print(f'---> [{model_name}] FID: {fid:.4f}') - print(f'---> [{model_name}] FID: {fid:.4f}', file=file, flush=True) - eval_dict[model_name] = fid - return eval_dict - - -def evaluate_diversity(activation_dict, file): - eval_dict = OrderedDict({}) - print('========== Evaluating Diversity ==========') - for model_name, motion_embeddings in activation_dict.items(): - diversity = calculate_diversity(motion_embeddings, diversity_times) - eval_dict[model_name] = diversity - print(f'---> [{model_name}] Diversity: {diversity:.4f}') - print(f'---> [{model_name}] Diversity: {diversity:.4f}', file=file, flush=True) - return eval_dict - - -def evaluate_multimodality(mm_motion_loaders, file): - eval_dict = OrderedDict({}) - print('========== Evaluating MultiModality ==========') - for model_name, mm_motion_loader in mm_motion_loaders.items(): - mm_motion_embeddings = [] - with torch.no_grad(): - for idx, batch in enumerate(mm_motion_loader): - # (1, mm_replications, dim_pos) - motions, m_lens = batch - motion_embedings = eval_wrapper.get_motion_embeddings(motions[0], m_lens[0]) - mm_motion_embeddings.append(motion_embedings.unsqueeze(0)) - if len(mm_motion_embeddings) == 0: - multimodality = 0 - else: - mm_motion_embeddings = torch.cat(mm_motion_embeddings, dim=0).cpu().numpy() - multimodality = calculate_multimodality(mm_motion_embeddings, mm_num_times) - print(f'---> [{model_name}] Multimodality: {multimodality:.4f}') - print(f'---> [{model_name}] Multimodality: {multimodality:.4f}', file=file, flush=True) - eval_dict[model_name] = multimodality - return eval_dict - - -def get_metric_statistics(values): - mean = np.mean(values, axis=0) - std = np.std(values, axis=0) - conf_interval = 1.96 * std / np.sqrt(replication_times) - return mean, conf_interval - - -def evaluation(log_file): - with open(log_file, 'w') as f: - all_metrics = OrderedDict({'Matching Score': OrderedDict({}), - 'R_precision': OrderedDict({}), - 'FID': OrderedDict({}), - 'Diversity': OrderedDict({}), - 'MultiModality': OrderedDict({})}) - for replication in range(replication_times): - motion_loaders = {} - mm_motion_loaders = {} - motion_loaders['ground truth'] = gt_loader - for motion_loader_name, motion_loader_getter in eval_motion_loaders.items(): - motion_loader, mm_motion_loader = motion_loader_getter() - motion_loaders[motion_loader_name] = motion_loader - mm_motion_loaders[motion_loader_name] = mm_motion_loader - - print(f'==================== Replication {replication} ====================') - print(f'==================== Replication {replication} ====================', file=f, flush=True) - print(f'Time: {datetime.now()}') - print(f'Time: {datetime.now()}', file=f, flush=True) - mat_score_dict, R_precision_dict, acti_dict = evaluate_matching_score(motion_loaders, f) - - print(f'Time: {datetime.now()}') - print(f'Time: {datetime.now()}', file=f, flush=True) - fid_score_dict = evaluate_fid(gt_loader, acti_dict, f) - - print(f'Time: {datetime.now()}') - print(f'Time: {datetime.now()}', file=f, flush=True) - div_score_dict = evaluate_diversity(acti_dict, f) - - print(f'Time: {datetime.now()}') - print(f'Time: {datetime.now()}', file=f, flush=True) - mm_score_dict = evaluate_multimodality(mm_motion_loaders, f) - - print(f'!!! DONE !!!') - print(f'!!! DONE !!!', file=f, flush=True) - - for key, item in mat_score_dict.items(): - if key not in all_metrics['Matching Score']: - all_metrics['Matching Score'][key] = [item] - else: - all_metrics['Matching Score'][key] += [item] - - for key, item in R_precision_dict.items(): - if key not in all_metrics['R_precision']: - all_metrics['R_precision'][key] = [item] - else: - all_metrics['R_precision'][key] += [item] - - for key, item in fid_score_dict.items(): - if key not in all_metrics['FID']: - all_metrics['FID'][key] = [item] - else: - all_metrics['FID'][key] += [item] - - for key, item in div_score_dict.items(): - if key not in all_metrics['Diversity']: - all_metrics['Diversity'][key] = [item] - else: - all_metrics['Diversity'][key] += [item] - - for key, item in mm_score_dict.items(): - if key not in all_metrics['MultiModality']: - all_metrics['MultiModality'][key] = [item] - else: - all_metrics['MultiModality'][key] += [item] - - - # print(all_metrics['Diversity']) - for metric_name, metric_dict in all_metrics.items(): - print('========== %s Summary ==========' % metric_name) - print('========== %s Summary ==========' % metric_name, file=f, flush=True) - - for model_name, values in metric_dict.items(): - # print(metric_name, model_name) - mean, conf_interval = get_metric_statistics(np.array(values)) - # print(mean, mean.dtype) - if isinstance(mean, np.float64) or isinstance(mean, np.float32): - print(f'---> [{model_name}] Mean: {mean:.4f} CInterval: {conf_interval:.4f}') - print(f'---> [{model_name}] Mean: {mean:.4f} CInterval: {conf_interval:.4f}', file=f, flush=True) - elif isinstance(mean, np.ndarray): - line = f'---> [{model_name}]' - for i in range(len(mean)): - line += '(top %d) Mean: %.4f CInt: %.4f;' % (i+1, mean[i], conf_interval[i]) - print(line) - print(line, file=f, flush=True) - - -if __name__ == '__main__': - mm_num_samples = 100 - mm_num_repeats = 30 - mm_num_times = 10 - - diversity_times = 300 - replication_times = 1 - batch_size = 32 - opt_path = sys.argv[1] - dataset_opt_path = opt_path - - try: - device_id = int(sys.argv[2]) - except: - device_id = 0 - device = torch.device('cuda:%d' % device_id if torch.cuda.is_available() else 'cpu') - torch.cuda.set_device(device_id) - - gt_loader, gt_dataset = get_dataset_motion_loader(dataset_opt_path, batch_size, device) - wrapper_opt = get_opt(dataset_opt_path, device) - eval_wrapper = EvaluatorModelWrapper(wrapper_opt) - - opt = get_opt(opt_path, device) - encoder = build_models(opt, opt.dim_pose) - trainer = DDPMTrainer(opt, encoder) - eval_motion_loaders = { - 'text2motion': lambda: get_motion_loader( - opt, - batch_size, - trainer, - gt_dataset, - mm_num_samples, - mm_num_repeats - ) - } - - log_file = './t2m_evaluation.log' - evaluation(log_file) diff --git a/spaces/mithril-security/blind_chat/.svelte-kit/types/src/routes/r/[id]/$types.d.ts b/spaces/mithril-security/blind_chat/.svelte-kit/types/src/routes/r/[id]/$types.d.ts deleted file mode 100644 index 576c1cc66441770a31a5dd243af87149bb5105cc..0000000000000000000000000000000000000000 --- a/spaces/mithril-security/blind_chat/.svelte-kit/types/src/routes/r/[id]/$types.d.ts +++ /dev/null @@ -1,23 +0,0 @@ -import type * as Kit from '@sveltejs/kit'; - -type Expand = T extends infer O ? { [K in keyof O]: O[K] } : never; -type RouteParams = { id: string } -type RouteId = '/r/[id]'; -type MaybeWithVoid = {} extends T ? T | void : T; -export type RequiredKeys = { [K in keyof T]-?: {} extends { [P in K]: T[K] } ? never : K; }[keyof T]; -type OutputDataShape = MaybeWithVoid> & Partial> & Record> -type EnsureDefined = T extends null | undefined ? {} : T; -type OptionalUnion, A extends keyof U = U extends U ? keyof U : never> = U extends unknown ? { [P in Exclude]?: never } & U : never; -export type Snapshot = Kit.Snapshot; -type PageServerParentData = EnsureDefined; -type PageParentData = EnsureDefined; - -export type EntryGenerator = () => Promise> | Array; -export type PageServerLoad = OutputDataShape> = Kit.ServerLoad; -export type PageServerLoadEvent = Parameters[0]; -export type ActionData = unknown; -export type PageServerData = Expand>>>>>; -export type PageData = Expand & EnsureDefined>; -export type Action | void = Record | void> = Kit.Action -export type Actions | void = Record | void> = Kit.Actions -export type RequestEvent = Kit.RequestEvent; \ No newline at end of file diff --git a/spaces/monra/freegpt-webui/g4f/Provider/Providers/helpers/theb.py b/spaces/monra/freegpt-webui/g4f/Provider/Providers/helpers/theb.py deleted file mode 100644 index 71cfd23ff34768092e4dbe3ff6b719a946dceebb..0000000000000000000000000000000000000000 --- a/spaces/monra/freegpt-webui/g4f/Provider/Providers/helpers/theb.py +++ /dev/null @@ -1,48 +0,0 @@ -import json -import sys -from re import findall -from curl_cffi import requests - -config = json.loads(sys.argv[1]) -prompt = config['messages'][-1]['content'] - -headers = { - 'authority': 'chatbot.theb.ai', - 'accept': 'application/json, text/plain, */*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'content-type': 'application/json', - 'origin': 'https://chatbot.theb.ai', - 'referer': 'https://chatbot.theb.ai/', - 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36', -} - -json_data = { - 'prompt': prompt, - 'options': {} -} - -def format(chunk): - try: - completion_chunk = findall(r'content":"(.*)"},"fin', chunk.decode())[0] - print(completion_chunk, flush=True, end='') - - except Exception as e: - print(f'[ERROR] an error occured, retrying... | [[{chunk.decode()}]]', flush=True) - return - -while True: - try: - response = requests.post('https://chatbot.theb.ai/api/chat-process', - headers=headers, json=json_data, content_callback=format, impersonate='chrome110') - - exit(0) - - except Exception as e: - print('[ERROR] an error occured, retrying... |', e, flush=True) - continue \ No newline at end of file diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/textless_nlp/gslm/unit2speech/synthesize_audio_from_units.py b/spaces/mshukor/UnIVAL/fairseq/examples/textless_nlp/gslm/unit2speech/synthesize_audio_from_units.py deleted file mode 100644 index f226d5f50514ecb5ee3b4f1031df750609a56112..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/textless_nlp/gslm/unit2speech/synthesize_audio_from_units.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import logging -import os - -import soundfile as sf -from examples.textless_nlp.gslm.unit2speech.tts_data import ( - TacotronInputDataset, -) -from examples.textless_nlp.gslm.unit2speech.utils import ( - load_quantized_audio_from_file, - load_tacotron, - load_waveglow, - synthesize_audio, -) - - -def get_logger(): - log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" - logging.basicConfig(format=log_format, level=logging.INFO) - logger = logging.getLogger(__name__) - return logger - - -def get_parser(): - parser = argparse.ArgumentParser( - description="Wav2Vec 2.0 speech generator." - ) - parser.add_argument( - "--quantized_unit_path", - type=str, - help="K-means model file path to use for inference", - ) - parser.add_argument( - "--tts_model_path", - type=str, - help="TTS model file path to use for inference", - ) - parser.add_argument( - "--waveglow_path", - type=str, - help="Path to the waveglow checkpoint (vocoder).", - ) - parser.add_argument("--max_decoder_steps", type=int, default=2000) - parser.add_argument("--denoiser_strength", type=float, default=0.1) - parser.add_argument( - "--out_audio_dir", - type=str, - help="Output directory to dump audio files", - ) - - return parser - - -def main(args, logger): - # Load quantized audio - logger.info(f"Loading quantized audio from {args.quantized_unit_path}...") - names_batch, quantized_units_batch = load_quantized_audio_from_file( - file_path=args.quantized_unit_path - ) - - logger.info(f"Loading TTS model from {args.tts_model_path}...") - tacotron_model, sample_rate, hparams = load_tacotron( - tacotron_model_path=args.tts_model_path, - max_decoder_steps=args.max_decoder_steps, - ) - - logger.info(f"Loading Waveglow model from {args.waveglow_path}...") - waveglow, denoiser = load_waveglow(waveglow_path=args.waveglow_path) - - tts_dataset = TacotronInputDataset(hparams) - for name, quantized_units in zip(names_batch, quantized_units_batch): - quantized_units_str = " ".join(map(str, quantized_units)) - tts_input = tts_dataset.get_tensor(quantized_units_str) - mel, aud, aud_dn, has_eos = synthesize_audio( - tacotron_model, - waveglow, - denoiser, - tts_input.unsqueeze(0), - strength=args.denoiser_strength, - ) - out_file_path = os.path.join(args.out_audio_dir, f"{name}.wav") - sf.write( - f"{out_file_path}", aud_dn[0].cpu().float().numpy(), sample_rate - ) - - -if __name__ == "__main__": - parser = get_parser() - args = parser.parse_args() - logger = get_logger() - logger.info(args) - main(args, logger) diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/model_parallel/megatron_trainer.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/model_parallel/megatron_trainer.py deleted file mode 100644 index 8ab4657f73c6cda91e95637921edb84ccb76b3d0..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/model_parallel/megatron_trainer.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -""" -Train a network across multiple GPUs. -""" - -from fairseq.dataclass.configs import FairseqConfig -from fairseq.distributed import utils as distributed_utils -from fairseq.trainer import Trainer - -try: - from fairseq.model_parallel.megatron.mpu import ( - get_data_parallel_rank, - get_data_parallel_world_size, - get_model_parallel_src_rank, - get_cuda_rng_tracker, - ) - - has_megatron_submodule = True -except (ImportError, ModuleNotFoundError): - has_megatron_submodule = False - - -class MegatronTrainer(Trainer): - """Main class for model parallel with data parallel training.""" - - def __init__(self, cfg: FairseqConfig, task, model, criterion, **kwargs): - if not has_megatron_submodule: - raise ImportError( - "\n\nPlease install the megatron submodule:" - "\n\n git submodule update --init " - "fairseq/model_parallel/megatron" - ) - super().__init__(cfg, task, model, criterion, **kwargs) - - def clip_grad_norm(self, clip_norm): - def _aggregate_model_parallel_grad_norm(total_norm): - total_norm = total_norm ** 2 - distributed_utils.all_reduce( - total_norm, group=distributed_utils.get_model_parallel_group() - ) - total_norm = total_norm ** 0.5 - return total_norm - - return self.optimizer.clip_grad_norm( - clip_norm, - aggregate_norm_fn=_aggregate_model_parallel_grad_norm, - ) - - def save_checkpoint(self, filename, extra_state): - """Save all training state in a checkpoint file.""" - extra_state['rng_tracker_states'] \ - = get_cuda_rng_tracker().get_states() - super().save_checkpoint(filename, extra_state) - - def load_checkpoint( - self, - filename, - reset_optimizer=False, - reset_lr_scheduler=False, - optimizer_overrides=None, - reset_meters=False, - ): - extra_state = super().load_checkpoint(filename, reset_optimizer=reset_optimizer, reset_lr_scheduler=reset_lr_scheduler, optimizer_overrides=optimizer_overrides, reset_meters=reset_meters) - if extra_state is not None and 'rng_tracker_states' in extra_state: - get_cuda_rng_tracker().set_states( - extra_state['rng_tracker_states']) - return extra_state diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Abacom Front Designer 3.0 Crack [VERIFIED]l.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Abacom Front Designer 3.0 Crack [VERIFIED]l.md deleted file mode 100644 index 254044c5d5dd26d3fdd95da0082af1e5b465ad5b..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Abacom Front Designer 3.0 Crack [VERIFIED]l.md +++ /dev/null @@ -1,29 +0,0 @@ - -``` -

        How to Design Professional-Looking Front Panels with Abacom Front Designer 3.0

        -

        Abacom Front Designer 3.0 is a software that allows you to create custom front panels for your electronic projects. Whether you need to design a panel for a simple circuit or a complex device, Abacom Front Designer 3.0 can help you achieve a professional-looking result.

        -

        Abacom Front Designer 3.0 Crackl


        Download Zip ⇒⇒⇒ https://urlcod.com/2uI9ZY



        -

        In this article, we will show you how to use Abacom Front Designer 3.0 to create a front panel for a digital clock. We will also explain how to crack the software and unlock its full features.

        -

        Step 1: Download and Install Abacom Front Designer 3.0

        -

        You can download Abacom Front Designer 3.0 from the official website of the developer, ELECTRONIC-SOFTWARE-SHOP. The software costs 49,90 EUR and comes with a CD-ROM or a download option. You can also try the free demo version before buying.

        -

        After downloading the software, run the setup file and follow the instructions to install it on your computer. You will need to enter your license key during the installation process. If you don't have one, you can use the following crack method to bypass the activation.

        -

        Step 2: Crack Abacom Front Designer 3.0

        -

        To crack Abacom Front Designer 3.0, you will need to download a pre-cracked version of the software from ElectroGenesis. This version has been modified to bypass the license check and enable all the features of the software.

        -

        After downloading the pre-cracked version, unzip it and copy the file "FrontDesigner.exe" to the installation folder of Abacom Front Designer 3.0, replacing the original file. You can now run the software without entering any license key.

        -

        -

        Step 3: Design Your Front Panel

        -

        Now that you have installed and cracked Abacom Front Designer 3.0, you can start designing your front panel. The software has a user-friendly interface that lets you draw various shapes, symbols, labels, drillings, scales, etc. You can also use predefined and user-editable libraries to add common elements to your panel.

        -

        For this example, we will design a front panel for a digital clock that displays hours, minutes, seconds, and date. The panel will have a rectangular shape with rounded corners, a black background color, and white text and symbols. It will also have four buttons for setting the time and date.

        -

        To create your front panel, follow these steps:

        -
          -
        1. Open Abacom Front Designer 3.0 and click on "New" to create a new project.
        2. -
        3. Enter the dimensions of your panel in millimeters (e.g., 150 x 100) and click on "OK".
        4. -
        5. Select the "Rectangle" tool from the toolbar and draw a rectangle that covers the entire panel area.
        6. -
        7. Select the rectangle and click on "Properties" to change its attributes. Set the corner radius to 10 mm, the fill color to black, and the line color to none.
        8. -
        9. Select the "Text" tool from the toolbar and type "DIGITAL CLOCK" at the top center of the panel. Change its font size to 18 pt, font color to white, and alignment to center.
        10. -
        11. Select the "Symbol" tool from the toolbar and choose "7-segment display" from the library. Drag and drop four instances of this symbol below the text "DIGITAL CLOCK". Arrange them horizontally with some space between them.
        12. -
        13. Select each symbol and click on "Properties" to change its attributes. Set the segment width to 4 mm, segment length to 12 mm, segment color to red, background color to black, and decimal point position to none.
        14. -
        15. Select all four symbols and click on "Group" to group them together.
        16. -
        17. Select the group and click on "Scale" to resize it proportionally. Enter a scale factor of

          e93f5a0c3f
          -
          -
          \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Battlefield Hardline Crack Only Free Download.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Battlefield Hardline Crack Only Free Download.md deleted file mode 100644 index fcc779c93f61fcb080a17dce11a5a3dff07453a6..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Battlefield Hardline Crack Only Free Download.md +++ /dev/null @@ -1,31 +0,0 @@ -
          -Here is a possible title and article with html formatting for the keyword "battlefield hardline free download": - -

          How to Download Battlefield Hardline for Free

          -

          Battlefield Hardline is a first-person shooter game that takes you to the streets of Miami, where you can choose to play as a cop or a criminal. The game features a thrilling single-player campaign with a gripping story and diverse characters, as well as a multiplayer mode with 12 maps and 7 modes. If you want to experience the action-packed gameplay of Battlefield Hardline for free, here are some steps you can follow:

          -
            -
          1. Go to https://battlefield-hardline.en.softonic.com/, which is a trusted website that offers free downloads of various games and software.
          2. -
          3. Click on the green "Free Download" button and wait for the download to start.
          4. -
          5. Once the download is complete, open the file and follow the instructions to install the game on your PC.
          6. -
          7. Enjoy playing Battlefield Hardline for free!
          8. -
          -

          Note: This is a trial version of the game that lets you play for 10 hours. If you want to unlock the full game, you will need to purchase it from the official website or a digital store.

          -

          battlefield hardline crack only free download


          Downloadhttps://urlcod.com/2uIcoI



          Here is a possible continuation of the article: - -

          What Makes Battlefield Hardline Different from Other Battlefield Games?

          -

          Battlefield Hardline is not your typical military shooter game. Instead of fighting in war zones, you are immersed in a crime drama that takes inspiration from popular TV shows like CSI and Breaking Bad. The game offers a fresh perspective on the Battlefield franchise, with some notable differences:

          -
            -
          • The setting: The game takes place in various locations across Miami and Los Angeles, such as a bank, a mall, a casino, and a desert. The environments are colorful and vibrant, with dynamic weather and day-night cycles.
          • -
          • The gameplay: The game introduces new mechanics and features that suit the urban theme, such as zip lines, grappling hooks, tasers, stun guns, and handcuffs. You can also use vehicles like cars, motorcycles, helicopters, and boats to chase or escape your enemies.
          • -
          • The modes: The game has 7 multiplayer modes that cater to different play styles and preferences. Some of the modes are: -
              -
            • Heist: The criminals try to rob a vault and escape with the loot, while the cops try to stop them.
            • -
            • Blood Money: Both teams compete to collect money from a central pile and bring it back to their base.
            • -
            • Hotwire: The criminals try to steal marked vehicles and drive them at high speed, while the cops try to destroy them.
            • -
            • Rescue: The cops try to rescue hostages from the criminals, who can either negotiate or eliminate them.
            • -
            -
          • -
          -

          If you are looking for a new and exciting way to enjoy the Battlefield series, you should definitely give Battlefield Hardline a try. Download it for free today and see for yourself!

          7196e7f11a
          -
          -
          \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download Lianja App Builder Crack !EXCLUSIVE!.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download Lianja App Builder Crack !EXCLUSIVE!.md deleted file mode 100644 index a96c5ef8f881d2981fa7fd3fab208933d13208b4..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download Lianja App Builder Crack !EXCLUSIVE!.md +++ /dev/null @@ -1,40 +0,0 @@ - -

          How to Download Lianja App Builder for Windows, Linux and macOS

          -

          Lianja App Builder is a powerful tool that lets you create professional-quality business apps for desktop, cloud and mobile devices using no code, Visual FoxPro, Python, PHP or HTML5 JavaScript. Lianja App Builder is part of the Lianja APaaS (Application Platform As A Service), which provides a complete end-to-end solution for developing and deploying apps on Windows, Linux and macOS. But how can you download Lianja App Builder and start building your own apps?

          -

          download lianja app builder crack


          Download Filehttps://urlcod.com/2uIbHq



          -

          In this article, we will show you how to download Lianja App Builder for your preferred operating system and get started with app development in minutes.

          -

          Download Lianja App Builder for Windows

          -

          If you are using Windows, you can download Lianja App Builder from the official website[^1^]. The installer includes both the Lianja App Builder and the Lianja App Center (desktop client), as well as a Lianja Server (Lianja Cloud Server and Lianja SQL Server) for development and testing purposes and the Lianja ODBC Driver.

          -

          To install Lianja App Builder on Windows, follow these steps[^2^]:

          -
            -
          1. Double-click on the downloaded installer to run it.
          2. -
          3. Select the installation language, then click OK.
          4. -
          5. Click Next > to continue.
          6. -
          7. Read and accept the License Agreement, then click Next >.
          8. -
          9. Select the Installation Directory, then click Next >. The default directory is 'C:\lianja'.
          10. -
          11. Select the components you require, then click Next >.
          12. -
          13. Click Next > to begin the installation.
          14. -
          15. Uncheck the box if you do not want to view the Readme File, then click Finish to complete the installation.
          16. -
          -

          To run Lianja App Builder on Windows, double-click the Lianja App Builder desktop shortcut. To run the Lianja App Center, double-click its desktop shortcut. To manage the Lianja Server, double-click the Lianja Server Manager desktop shortcut. To manage Lianja ODBC Data Sources, double-click the Lianja ODBC Manager desktop shortcut.

          -

          Download Lianja App Builder for Linux

          -

          If you are using Linux, you can download Lianja App Builder from the official website[^1^]. The installer includes both the Lianja App Builder and the Lianja App Center (desktop client), as well as a Lianja Server (Lianja Cloud Server and Lianja SQL Server) for development and testing purposes and the Lianja ODBC Driver.

          -

          To install Lianja App Builder on Linux, follow these steps:

          -
            -
          1. Download the installer file for your Linux distribution (Debian/Ubuntu or RedHat/CentOS).
          2. -
          3. Open a terminal window and change to the directory where you downloaded the installer file.
          4. -
          5. Make the installer file executable by running chmod +x lianja-*.sh
          6. -
          7. Run the installer file as root by running sudo ./lianja-*.sh
          8. -
          9. Follow the instructions on screen to complete the installation.
          10. -
          -

          To run Lianja App Builder on Linux, open a terminal window and run lianja. To run the Lianja App Center, run lianja-appcenter. To manage the Lianja Server, run lianja-server-manager. To manage Lianja ODBC Data Sources, run lianja-odbc-manager.

          -

          -

          Download Lianja App Builder for macOS

          -

          If you are using macOS, you can download Lianja App Builder from the official website[^1^]. The installer includes both the Lianja App Builder and the Lianja App Center (desktop client), as well as a Lianja Server (Lianja Cloud Server and Lianja SQL Server) for development and testing purposes and the Lianja ODBC Driver.

          -

          To install Lianja App Builder on macOS, follow these steps:

          -
            -
          1. Double-click on the downloaded installer file to mount it as a disk image.
          2. -
          3. Drag and drop the lianja folder onto your Applications folder.
          4. -
          5. Eject the disk

            e93f5a0c3f
            -
            -
            \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Rossington Collins Band Anytime Anyplace Anywhere Rar __EXCLUSIVE__.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Rossington Collins Band Anytime Anyplace Anywhere Rar __EXCLUSIVE__.md deleted file mode 100644 index ac2377acbdd8239036ad189a8759585c7672cdf4..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Rossington Collins Band Anytime Anyplace Anywhere Rar __EXCLUSIVE__.md +++ /dev/null @@ -1,13 +0,0 @@ - -

            Rossington Collins Band: Anytime, Anyplace, Anywhere - A Southern Rock Classic

            -

            The Rossington Collins Band was a short-lived but influential Southern rock group that emerged from the ashes of Lynyrd Skynyrd after the tragic plane crash that claimed the lives of three of its members in 1977. The band was formed by guitarists Gary Rossington and Allen Collins, who were joined by two other surviving Skynyrd members, keyboardist Billy Powell and bassist Leon Wilkeson. They also recruited vocalist Dale Krantz, drummer Derek Hess and guitarist Barry Harwood to complete the lineup.

            -

            The band released two albums in their brief career, but their debut, Anytime, Anyplace, Anywhere, is widely regarded as their masterpiece. Released in 1980, the album showcases the band's distinctive blend of bluesy rock, country and soul, with powerful vocals by Krantz and impressive guitar work by Rossington and Collins. The album features some of the band's best-known songs, such as "Prime Time", "Three Times As Bad", "Don't Misunderstand Me" and "Sometimes You Can Put It Out". The album also includes a tribute to their fallen Skynyrd comrades, "Tashauna", which features a haunting slide guitar solo by Collins.

            -

            rossington collins band anytime anyplace anywhere rar


            DOWNLOADhttps://urlcod.com/2uIa0o



            -

            Anytime, Anyplace, Anywhere is a rare gem of Southern rock that deserves to be rediscovered by fans of the genre. The album is available for download in mp3 or FLAC format from this link.[^1^]

            - -

            The Rossington Collins Band followed up their successful debut with their second and final album, This Is the Way, in 1981. The album continued the band's musical direction, but with a slightly more polished and commercial sound. The album featured some of the band's most melodic and catchy songs, such as "Don't Stop Me Now", "Seems Like Every Day" and "Next Phone Call". The album also showcased the band's versatility, with songs ranging from the hard-rocking "Gotta Get It Straight" and "Gonna Miss It When It's Gone" to the country-flavored "Fancy Ideas" and "I'm Free Today". The album also included another tribute to their Skynyrd roots, "Pine Box", which featured a slide guitar solo by Collins.

            -

            This Is the Way received mixed reviews from critics, who praised the band's musicianship and vocals, but criticized the album's lack of originality and edge. The album sold less than its predecessor, and failed to produce any hit singles. The band also faced some internal problems, as Collins and Krantz (who had married Rossington) had a falling out over creative differences. The band decided to disband in 1982, after only two years of existence. Rossington and Krantz formed a new band, The Rossington Band, while Collins formed a new band, The Allen Collins Band. Both bands released one album each before dissolving.

            -

            -

            The Rossington Collins Band left behind a legacy of two albums that captured the spirit and sound of Southern rock in the early 1980s. The band's albums have been reissued on CD and are available for download from various online sources. The band's songs have also been covered by other artists, such as Molly Hatchet, Blackberry Smoke and Black Stone Cherry. The band's members have remained active in the music scene, with Rossington, Powell and Wilkeson rejoining Lynyrd Skynyrd in 1987. Collins died in 1990 from complications of a car accident that left him paralyzed in 1986. Wilkeson died in 2001 from chronic liver and lung disease. Powell died in 2009 from a heart attack. Rossington is the only surviving original member of Lynyrd Skynyrd still performing with the band.[^2^] [^3^]

            7196e7f11a
            -
            -
            \ No newline at end of file diff --git a/spaces/nivalk/dermAI/README.md b/spaces/nivalk/dermAI/README.md deleted file mode 100644 index 159a7d47c487ae3e86cd6074cf1539d69b74cf78..0000000000000000000000000000000000000000 --- a/spaces/nivalk/dermAI/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: DermAI -emoji: 🏃 -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/nomic-ai/stanfordnlp_SHP/README.md b/spaces/nomic-ai/stanfordnlp_SHP/README.md deleted file mode 100644 index 0363a90c1ea4187413793f8e202b94f109e0eb55..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/stanfordnlp_SHP/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: stanfordnlp/SHP -emoji: 🗺️ -colorFrom: purple -colorTo: red -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ntt123/WaveGRU-Text-To-Speech/sparse_matmul/layers/errno_mapping.h b/spaces/ntt123/WaveGRU-Text-To-Speech/sparse_matmul/layers/errno_mapping.h deleted file mode 100644 index 747d3b4d4b9c2761f1a3f24f8bfa0da49a34ec19..0000000000000000000000000000000000000000 --- a/spaces/ntt123/WaveGRU-Text-To-Speech/sparse_matmul/layers/errno_mapping.h +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef THIRD_PARTY_LYRA_CODEC_SPARSE_MATMUL_LAYERS_ERRNO_MAPPING_H_ -#define THIRD_PARTY_LYRA_CODEC_SPARSE_MATMUL_LAYERS_ERRNO_MAPPING_H_ - -#include "absl/status/status.h" -#include "absl/strings/string_view.h" - -namespace csrblocksparse { - -// Converts |error_number| value to absl::Status. -absl::Status ErrnoToCanonicalStatus(int error_number, - absl::string_view message); - -} // namespace csrblocksparse - -#endif // THIRD_PARTY_LYRA_CODEC_SPARSE_MATMUL_LAYERS_ERRNO_MAPPING_H_ diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/tenacity/after.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/tenacity/after.py deleted file mode 100644 index 574c9bcea6e222ea8283a3c8dafbda15a2893fe1..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/tenacity/after.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2016 Julien Danjou -# Copyright 2016 Joshua Harlow -# Copyright 2013-2014 Ray Holder -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import typing - -from pip._vendor.tenacity import _utils - -if typing.TYPE_CHECKING: - import logging - - from pip._vendor.tenacity import RetryCallState - - -def after_nothing(retry_state: "RetryCallState") -> None: - """After call strategy that does nothing.""" - - -def after_log( - logger: "logging.Logger", - log_level: int, - sec_format: str = "%0.3f", -) -> typing.Callable[["RetryCallState"], None]: - """After call strategy that logs to some logger the finished attempt.""" - - def log_it(retry_state: "RetryCallState") -> None: - if retry_state.fn is None: - # NOTE(sileht): can't really happen, but we must please mypy - fn_name = "" - else: - fn_name = _utils.get_callback_name(retry_state.fn) - logger.log( - log_level, - f"Finished call to '{fn_name}' " - f"after {sec_format % retry_state.seconds_since_start}(s), " - f"this was the {_utils.to_ordinal(retry_state.attempt_number)} time calling it.", - ) - - return log_it diff --git a/spaces/plzdontcry/dakubettergpt/src/components/ChatConfigMenu/index.ts b/spaces/plzdontcry/dakubettergpt/src/components/ChatConfigMenu/index.ts deleted file mode 100644 index 3c336f6bedfc0ebf627ca42b4591a66e1d412e62..0000000000000000000000000000000000000000 --- a/spaces/plzdontcry/dakubettergpt/src/components/ChatConfigMenu/index.ts +++ /dev/null @@ -1 +0,0 @@ -export { default } from './ChatConfigMenu'; diff --git a/spaces/prerna9811/Chord/portaudio/test/patest_mono.c b/spaces/prerna9811/Chord/portaudio/test/patest_mono.c deleted file mode 100644 index e7d7d1b6797d0ebebb9ddf187af81485f630ed64..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/test/patest_mono.c +++ /dev/null @@ -1,155 +0,0 @@ -/** @file patest_mono.c - @ingroup test_src - @brief Play a monophonic sine wave using the Portable Audio api for several seconds. - @author Phil Burk http://www.softsynth.com -*/ -/* - * $Id$ - * - * Authors: - * Ross Bencina - * Phil Burk - * - * This program uses the PortAudio Portable Audio Library. - * For more information see: http://www.portaudio.com - * Copyright (c) 1999-2000 Ross Bencina and Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include -#include -#include "portaudio.h" - -#define NUM_SECONDS (10) -#define SAMPLE_RATE (44100) -#define AMPLITUDE (0.8) -#define FRAMES_PER_BUFFER (64) -#define OUTPUT_DEVICE Pa_GetDefaultOutputDevice() - -#ifndef M_PI -#define M_PI (3.14159265) -#endif - -#define TABLE_SIZE (200) -typedef struct -{ - float sine[TABLE_SIZE]; - int phase; -} -paTestData; - -/* This routine will be called by the PortAudio engine when audio is needed. -** It may called at interrupt level on some machines so don't do anything -** that could mess up the system like calling malloc() or free(). -*/ -static int patestCallback( const void *inputBuffer, void *outputBuffer, - unsigned long framesPerBuffer, - const PaStreamCallbackTimeInfo* timeInfo, - PaStreamCallbackFlags statusFlags, - void *userData ) -{ - paTestData *data = (paTestData*)userData; - float *out = (float*)outputBuffer; - unsigned long i; - int finished = 0; - /* avoid unused variable warnings */ - (void) inputBuffer; - (void) timeInfo; - (void) statusFlags; - for( i=0; isine[data->phase]; /* left */ - data->phase += 1; - if( data->phase >= TABLE_SIZE ) data->phase -= TABLE_SIZE; - } - return finished; -} - -/*******************************************************************/ -int main(void); -int main(void) -{ - PaStreamParameters outputParameters; - PaStream *stream; - PaError err; - paTestData data; - int i; - printf("PortAudio Test: output MONO sine wave. SR = %d, BufSize = %d\n", SAMPLE_RATE, FRAMES_PER_BUFFER); - /* initialise sinusoidal wavetable */ - for( i=0; idefaultLowOutputLatency; - outputParameters.hostApiSpecificStreamInfo = NULL; - - err = Pa_OpenStream( - &stream, - NULL, /* no input */ - &outputParameters, - SAMPLE_RATE, - FRAMES_PER_BUFFER, - paClipOff, /* we won't output out of range samples so don't bother clipping them */ - patestCallback, - &data ); - if( err != paNoError ) goto error; - - err = Pa_StartStream( stream ); - if( err != paNoError ) goto error; - - printf("Play for %d seconds.\n", NUM_SECONDS ); fflush(stdout); - Pa_Sleep( NUM_SECONDS * 1000 ); - - err = Pa_StopStream( stream ); - if( err != paNoError ) goto error; - - err = Pa_CloseStream( stream ); - if( err != paNoError ) goto error; - - Pa_Terminate(); - printf("Test finished.\n"); - return err; -error: - Pa_Terminate(); - fprintf( stderr, "An error occurred while using the portaudio stream\n" ); - fprintf( stderr, "Error number: %d\n", err ); - fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); - return err; -} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/tables/_v_m_t_x.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/tables/_v_m_t_x.py deleted file mode 100644 index c965c94ee50904e57f7bca86b3b602c00520a9cc..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/tables/_v_m_t_x.py +++ /dev/null @@ -1,11 +0,0 @@ -from fontTools import ttLib - -superclass = ttLib.getTableClass("hmtx") - - -class table__v_m_t_x(superclass): - - headerTag = "vhea" - advanceName = "height" - sideBearingName = "tsb" - numberOfMetricsName = "numberOfVMetrics" diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/ttFont.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/ttFont.py deleted file mode 100644 index 1bece8e5e4cfc52693e60b1414454cef5505fb8c..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/ttFont.py +++ /dev/null @@ -1,1145 +0,0 @@ -from fontTools.config import Config -from fontTools.misc import xmlWriter -from fontTools.misc.configTools import AbstractConfig -from fontTools.misc.textTools import Tag, byteord, tostr -from fontTools.misc.loggingTools import deprecateArgument -from fontTools.ttLib import TTLibError -from fontTools.ttLib.ttGlyphSet import _TTGlyph, _TTGlyphSetCFF, _TTGlyphSetGlyf -from fontTools.ttLib.sfnt import SFNTReader, SFNTWriter -from io import BytesIO, StringIO, UnsupportedOperation -import os -import logging -import traceback - -log = logging.getLogger(__name__) - - -class TTFont(object): - - """Represents a TrueType font. - - The object manages file input and output, and offers a convenient way of - accessing tables. Tables will be only decompiled when necessary, ie. when - they're actually accessed. This means that simple operations can be extremely fast. - - Example usage:: - - >> from fontTools import ttLib - >> tt = ttLib.TTFont("afont.ttf") # Load an existing font file - >> tt['maxp'].numGlyphs - 242 - >> tt['OS/2'].achVendID - 'B&H\000' - >> tt['head'].unitsPerEm - 2048 - - For details of the objects returned when accessing each table, see :ref:`tables`. - To add a table to the font, use the :py:func:`newTable` function:: - - >> os2 = newTable("OS/2") - >> os2.version = 4 - >> # set other attributes - >> font["OS/2"] = os2 - - TrueType fonts can also be serialized to and from XML format (see also the - :ref:`ttx` binary):: - - >> tt.saveXML("afont.ttx") - Dumping 'LTSH' table... - Dumping 'OS/2' table... - [...] - - >> tt2 = ttLib.TTFont() # Create a new font object - >> tt2.importXML("afont.ttx") - >> tt2['maxp'].numGlyphs - 242 - - The TTFont object may be used as a context manager; this will cause the file - reader to be closed after the context ``with`` block is exited:: - - with TTFont(filename) as f: - # Do stuff - - Args: - file: When reading a font from disk, either a pathname pointing to a file, - or a readable file object. - res_name_or_index: If running on a Macintosh, either a sfnt resource name or - an sfnt resource index number. If the index number is zero, TTLib will - autodetect whether the file is a flat file or a suitcase. (If it is a suitcase, - only the first 'sfnt' resource will be read.) - sfntVersion (str): When constructing a font object from scratch, sets the four-byte - sfnt magic number to be used. Defaults to ``\0\1\0\0`` (TrueType). To create - an OpenType file, use ``OTTO``. - flavor (str): Set this to ``woff`` when creating a WOFF file or ``woff2`` for a WOFF2 - file. - checkChecksums (int): How checksum data should be treated. Default is 0 - (no checking). Set to 1 to check and warn on wrong checksums; set to 2 to - raise an exception if any wrong checksums are found. - recalcBBoxes (bool): If true (the default), recalculates ``glyf``, ``CFF ``, - ``head`` bounding box values and ``hhea``/``vhea`` min/max values on save. - Also compiles the glyphs on importing, which saves memory consumption and - time. - ignoreDecompileErrors (bool): If true, exceptions raised during table decompilation - will be ignored, and the binary data will be returned for those tables instead. - recalcTimestamp (bool): If true (the default), sets the ``modified`` timestamp in - the ``head`` table on save. - fontNumber (int): The index of the font in a TrueType Collection file. - lazy (bool): If lazy is set to True, many data structures are loaded lazily, upon - access only. If it is set to False, many data structures are loaded immediately. - The default is ``lazy=None`` which is somewhere in between. - """ - - def __init__( - self, - file=None, - res_name_or_index=None, - sfntVersion="\000\001\000\000", - flavor=None, - checkChecksums=0, - verbose=None, - recalcBBoxes=True, - allowVID=NotImplemented, - ignoreDecompileErrors=False, - recalcTimestamp=True, - fontNumber=-1, - lazy=None, - quiet=None, - _tableCache=None, - cfg={}, - ): - for name in ("verbose", "quiet"): - val = locals().get(name) - if val is not None: - deprecateArgument(name, "configure logging instead") - setattr(self, name, val) - - self.lazy = lazy - self.recalcBBoxes = recalcBBoxes - self.recalcTimestamp = recalcTimestamp - self.tables = {} - self.reader = None - self.cfg = cfg.copy() if isinstance(cfg, AbstractConfig) else Config(cfg) - self.ignoreDecompileErrors = ignoreDecompileErrors - - if not file: - self.sfntVersion = sfntVersion - self.flavor = flavor - self.flavorData = None - return - seekable = True - if not hasattr(file, "read"): - closeStream = True - # assume file is a string - if res_name_or_index is not None: - # see if it contains 'sfnt' resources in the resource or data fork - from . import macUtils - - if res_name_or_index == 0: - if macUtils.getSFNTResIndices(file): - # get the first available sfnt font. - file = macUtils.SFNTResourceReader(file, 1) - else: - file = open(file, "rb") - else: - file = macUtils.SFNTResourceReader(file, res_name_or_index) - else: - file = open(file, "rb") - else: - # assume "file" is a readable file object - closeStream = False - # SFNTReader wants the input file to be seekable. - # SpooledTemporaryFile has no seekable() on < 3.11, but still can seek: - # https://github.com/fonttools/fonttools/issues/3052 - if hasattr(file, "seekable"): - seekable = file.seekable() - elif hasattr(file, "seek"): - try: - file.seek(0) - except UnsupportedOperation: - seekable = False - - if not self.lazy: - # read input file in memory and wrap a stream around it to allow overwriting - if seekable: - file.seek(0) - tmp = BytesIO(file.read()) - if hasattr(file, "name"): - # save reference to input file name - tmp.name = file.name - if closeStream: - file.close() - file = tmp - elif not seekable: - raise TTLibError("Input file must be seekable when lazy=True") - self._tableCache = _tableCache - self.reader = SFNTReader(file, checkChecksums, fontNumber=fontNumber) - self.sfntVersion = self.reader.sfntVersion - self.flavor = self.reader.flavor - self.flavorData = self.reader.flavorData - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.close() - - def close(self): - """If we still have a reader object, close it.""" - if self.reader is not None: - self.reader.close() - - def save(self, file, reorderTables=True): - """Save the font to disk. - - Args: - file: Similarly to the constructor, can be either a pathname or a writable - file object. - reorderTables (Option[bool]): If true (the default), reorder the tables, - sorting them by tag (recommended by the OpenType specification). If - false, retain the original font order. If None, reorder by table - dependency (fastest). - """ - if not hasattr(file, "write"): - if self.lazy and self.reader.file.name == file: - raise TTLibError("Can't overwrite TTFont when 'lazy' attribute is True") - createStream = True - else: - # assume "file" is a writable file object - createStream = False - - tmp = BytesIO() - - writer_reordersTables = self._save(tmp) - - if not ( - reorderTables is None - or writer_reordersTables - or (reorderTables is False and self.reader is None) - ): - if reorderTables is False: - # sort tables using the original font's order - tableOrder = list(self.reader.keys()) - else: - # use the recommended order from the OpenType specification - tableOrder = None - tmp.flush() - tmp2 = BytesIO() - reorderFontTables(tmp, tmp2, tableOrder) - tmp.close() - tmp = tmp2 - - if createStream: - # "file" is a path - with open(file, "wb") as file: - file.write(tmp.getvalue()) - else: - file.write(tmp.getvalue()) - - tmp.close() - - def _save(self, file, tableCache=None): - """Internal function, to be shared by save() and TTCollection.save()""" - - if self.recalcTimestamp and "head" in self: - self[ - "head" - ] # make sure 'head' is loaded so the recalculation is actually done - - tags = list(self.keys()) - if "GlyphOrder" in tags: - tags.remove("GlyphOrder") - numTables = len(tags) - # write to a temporary stream to allow saving to unseekable streams - writer = SFNTWriter( - file, numTables, self.sfntVersion, self.flavor, self.flavorData - ) - - done = [] - for tag in tags: - self._writeTable(tag, writer, done, tableCache) - - writer.close() - - return writer.reordersTables() - - def saveXML(self, fileOrPath, newlinestr="\n", **kwargs): - """Export the font as TTX (an XML-based text file), or as a series of text - files when splitTables is true. In the latter case, the 'fileOrPath' - argument should be a path to a directory. - The 'tables' argument must either be false (dump all tables) or a - list of tables to dump. The 'skipTables' argument may be a list of tables - to skip, but only when the 'tables' argument is false. - """ - - writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr) - self._saveXML(writer, **kwargs) - writer.close() - - def _saveXML( - self, - writer, - writeVersion=True, - quiet=None, - tables=None, - skipTables=None, - splitTables=False, - splitGlyphs=False, - disassembleInstructions=True, - bitmapGlyphDataFormat="raw", - ): - - if quiet is not None: - deprecateArgument("quiet", "configure logging instead") - - self.disassembleInstructions = disassembleInstructions - self.bitmapGlyphDataFormat = bitmapGlyphDataFormat - if not tables: - tables = list(self.keys()) - if "GlyphOrder" not in tables: - tables = ["GlyphOrder"] + tables - if skipTables: - for tag in skipTables: - if tag in tables: - tables.remove(tag) - numTables = len(tables) - - if writeVersion: - from fontTools import version - - version = ".".join(version.split(".")[:2]) - writer.begintag( - "ttFont", - sfntVersion=repr(tostr(self.sfntVersion))[1:-1], - ttLibVersion=version, - ) - else: - writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1]) - writer.newline() - - # always splitTables if splitGlyphs is enabled - splitTables = splitTables or splitGlyphs - - if not splitTables: - writer.newline() - else: - path, ext = os.path.splitext(writer.filename) - - for i in range(numTables): - tag = tables[i] - if splitTables: - tablePath = path + "." + tagToIdentifier(tag) + ext - tableWriter = xmlWriter.XMLWriter( - tablePath, newlinestr=writer.newlinestr - ) - tableWriter.begintag("ttFont", ttLibVersion=version) - tableWriter.newline() - tableWriter.newline() - writer.simpletag(tagToXML(tag), src=os.path.basename(tablePath)) - writer.newline() - else: - tableWriter = writer - self._tableToXML(tableWriter, tag, splitGlyphs=splitGlyphs) - if splitTables: - tableWriter.endtag("ttFont") - tableWriter.newline() - tableWriter.close() - writer.endtag("ttFont") - writer.newline() - - def _tableToXML(self, writer, tag, quiet=None, splitGlyphs=False): - if quiet is not None: - deprecateArgument("quiet", "configure logging instead") - if tag in self: - table = self[tag] - report = "Dumping '%s' table..." % tag - else: - report = "No '%s' table found." % tag - log.info(report) - if tag not in self: - return - xmlTag = tagToXML(tag) - attrs = dict() - if hasattr(table, "ERROR"): - attrs["ERROR"] = "decompilation error" - from .tables.DefaultTable import DefaultTable - - if table.__class__ == DefaultTable: - attrs["raw"] = True - writer.begintag(xmlTag, **attrs) - writer.newline() - if tag == "glyf": - table.toXML(writer, self, splitGlyphs=splitGlyphs) - else: - table.toXML(writer, self) - writer.endtag(xmlTag) - writer.newline() - writer.newline() - - def importXML(self, fileOrPath, quiet=None): - """Import a TTX file (an XML-based text format), so as to recreate - a font object. - """ - if quiet is not None: - deprecateArgument("quiet", "configure logging instead") - - if "maxp" in self and "post" in self: - # Make sure the glyph order is loaded, as it otherwise gets - # lost if the XML doesn't contain the glyph order, yet does - # contain the table which was originally used to extract the - # glyph names from (ie. 'post', 'cmap' or 'CFF '). - self.getGlyphOrder() - - from fontTools.misc import xmlReader - - reader = xmlReader.XMLReader(fileOrPath, self) - reader.read() - - def isLoaded(self, tag): - """Return true if the table identified by ``tag`` has been - decompiled and loaded into memory.""" - return tag in self.tables - - def has_key(self, tag): - """Test if the table identified by ``tag`` is present in the font. - - As well as this method, ``tag in font`` can also be used to determine the - presence of the table.""" - if self.isLoaded(tag): - return True - elif self.reader and tag in self.reader: - return True - elif tag == "GlyphOrder": - return True - else: - return False - - __contains__ = has_key - - def keys(self): - """Returns the list of tables in the font, along with the ``GlyphOrder`` pseudo-table.""" - keys = list(self.tables.keys()) - if self.reader: - for key in list(self.reader.keys()): - if key not in keys: - keys.append(key) - - if "GlyphOrder" in keys: - keys.remove("GlyphOrder") - keys = sortedTagList(keys) - return ["GlyphOrder"] + keys - - def ensureDecompiled(self, recurse=None): - """Decompile all the tables, even if a TTFont was opened in 'lazy' mode.""" - for tag in self.keys(): - table = self[tag] - if recurse is None: - recurse = self.lazy is not False - if recurse and hasattr(table, "ensureDecompiled"): - table.ensureDecompiled(recurse=recurse) - self.lazy = False - - def __len__(self): - return len(list(self.keys())) - - def __getitem__(self, tag): - tag = Tag(tag) - table = self.tables.get(tag) - if table is None: - if tag == "GlyphOrder": - table = GlyphOrder(tag) - self.tables[tag] = table - elif self.reader is not None: - table = self._readTable(tag) - else: - raise KeyError("'%s' table not found" % tag) - return table - - def _readTable(self, tag): - log.debug("Reading '%s' table from disk", tag) - data = self.reader[tag] - if self._tableCache is not None: - table = self._tableCache.get((tag, data)) - if table is not None: - return table - tableClass = getTableClass(tag) - table = tableClass(tag) - self.tables[tag] = table - log.debug("Decompiling '%s' table", tag) - try: - table.decompile(data, self) - except Exception: - if not self.ignoreDecompileErrors: - raise - # fall back to DefaultTable, retaining the binary table data - log.exception( - "An exception occurred during the decompilation of the '%s' table", tag - ) - from .tables.DefaultTable import DefaultTable - - file = StringIO() - traceback.print_exc(file=file) - table = DefaultTable(tag) - table.ERROR = file.getvalue() - self.tables[tag] = table - table.decompile(data, self) - if self._tableCache is not None: - self._tableCache[(tag, data)] = table - return table - - def __setitem__(self, tag, table): - self.tables[Tag(tag)] = table - - def __delitem__(self, tag): - if tag not in self: - raise KeyError("'%s' table not found" % tag) - if tag in self.tables: - del self.tables[tag] - if self.reader and tag in self.reader: - del self.reader[tag] - - def get(self, tag, default=None): - """Returns the table if it exists or (optionally) a default if it doesn't.""" - try: - return self[tag] - except KeyError: - return default - - def setGlyphOrder(self, glyphOrder): - """Set the glyph order - - Args: - glyphOrder ([str]): List of glyph names in order. - """ - self.glyphOrder = glyphOrder - if hasattr(self, "_reverseGlyphOrderDict"): - del self._reverseGlyphOrderDict - if self.isLoaded("glyf"): - self["glyf"].setGlyphOrder(glyphOrder) - - def getGlyphOrder(self): - """Returns a list of glyph names ordered by their position in the font.""" - try: - return self.glyphOrder - except AttributeError: - pass - if "CFF " in self: - cff = self["CFF "] - self.glyphOrder = cff.getGlyphOrder() - elif "post" in self: - # TrueType font - glyphOrder = self["post"].getGlyphOrder() - if glyphOrder is None: - # - # No names found in the 'post' table. - # Try to create glyph names from the unicode cmap (if available) - # in combination with the Adobe Glyph List (AGL). - # - self._getGlyphNamesFromCmap() - elif len(glyphOrder) < self["maxp"].numGlyphs: - # - # Not enough names found in the 'post' table. - # Can happen when 'post' format 1 is improperly used on a font that - # has more than 258 glyphs (the lenght of 'standardGlyphOrder'). - # - log.warning( - "Not enough names found in the 'post' table, generating them from cmap instead" - ) - self._getGlyphNamesFromCmap() - else: - self.glyphOrder = glyphOrder - else: - self._getGlyphNamesFromCmap() - return self.glyphOrder - - def _getGlyphNamesFromCmap(self): - # - # This is rather convoluted, but then again, it's an interesting problem: - # - we need to use the unicode values found in the cmap table to - # build glyph names (eg. because there is only a minimal post table, - # or none at all). - # - but the cmap parser also needs glyph names to work with... - # So here's what we do: - # - make up glyph names based on glyphID - # - load a temporary cmap table based on those names - # - extract the unicode values, build the "real" glyph names - # - unload the temporary cmap table - # - if self.isLoaded("cmap"): - # Bootstrapping: we're getting called by the cmap parser - # itself. This means self.tables['cmap'] contains a partially - # loaded cmap, making it impossible to get at a unicode - # subtable here. We remove the partially loaded cmap and - # restore it later. - # This only happens if the cmap table is loaded before any - # other table that does f.getGlyphOrder() or f.getGlyphName(). - cmapLoading = self.tables["cmap"] - del self.tables["cmap"] - else: - cmapLoading = None - # Make up glyph names based on glyphID, which will be used by the - # temporary cmap and by the real cmap in case we don't find a unicode - # cmap. - numGlyphs = int(self["maxp"].numGlyphs) - glyphOrder = [None] * numGlyphs - glyphOrder[0] = ".notdef" - for i in range(1, numGlyphs): - glyphOrder[i] = "glyph%.5d" % i - # Set the glyph order, so the cmap parser has something - # to work with (so we don't get called recursively). - self.glyphOrder = glyphOrder - - # Make up glyph names based on the reversed cmap table. Because some - # glyphs (eg. ligatures or alternates) may not be reachable via cmap, - # this naming table will usually not cover all glyphs in the font. - # If the font has no Unicode cmap table, reversecmap will be empty. - if "cmap" in self: - reversecmap = self["cmap"].buildReversed() - else: - reversecmap = {} - useCount = {} - for i in range(numGlyphs): - tempName = glyphOrder[i] - if tempName in reversecmap: - # If a font maps both U+0041 LATIN CAPITAL LETTER A and - # U+0391 GREEK CAPITAL LETTER ALPHA to the same glyph, - # we prefer naming the glyph as "A". - glyphName = self._makeGlyphName(min(reversecmap[tempName])) - numUses = useCount[glyphName] = useCount.get(glyphName, 0) + 1 - if numUses > 1: - glyphName = "%s.alt%d" % (glyphName, numUses - 1) - glyphOrder[i] = glyphName - - if "cmap" in self: - # Delete the temporary cmap table from the cache, so it can - # be parsed again with the right names. - del self.tables["cmap"] - self.glyphOrder = glyphOrder - if cmapLoading: - # restore partially loaded cmap, so it can continue loading - # using the proper names. - self.tables["cmap"] = cmapLoading - - @staticmethod - def _makeGlyphName(codepoint): - from fontTools import agl # Adobe Glyph List - - if codepoint in agl.UV2AGL: - return agl.UV2AGL[codepoint] - elif codepoint <= 0xFFFF: - return "uni%04X" % codepoint - else: - return "u%X" % codepoint - - def getGlyphNames(self): - """Get a list of glyph names, sorted alphabetically.""" - glyphNames = sorted(self.getGlyphOrder()) - return glyphNames - - def getGlyphNames2(self): - """Get a list of glyph names, sorted alphabetically, - but not case sensitive. - """ - from fontTools.misc import textTools - - return textTools.caselessSort(self.getGlyphOrder()) - - def getGlyphName(self, glyphID): - """Returns the name for the glyph with the given ID. - - If no name is available, synthesises one with the form ``glyphXXXXX``` where - ```XXXXX`` is the zero-padded glyph ID. - """ - try: - return self.getGlyphOrder()[glyphID] - except IndexError: - return "glyph%.5d" % glyphID - - def getGlyphNameMany(self, lst): - """Converts a list of glyph IDs into a list of glyph names.""" - glyphOrder = self.getGlyphOrder() - cnt = len(glyphOrder) - return [glyphOrder[gid] if gid < cnt else "glyph%.5d" % gid for gid in lst] - - def getGlyphID(self, glyphName): - """Returns the ID of the glyph with the given name.""" - try: - return self.getReverseGlyphMap()[glyphName] - except KeyError: - if glyphName[:5] == "glyph": - try: - return int(glyphName[5:]) - except (NameError, ValueError): - raise KeyError(glyphName) - raise - - def getGlyphIDMany(self, lst): - """Converts a list of glyph names into a list of glyph IDs.""" - d = self.getReverseGlyphMap() - try: - return [d[glyphName] for glyphName in lst] - except KeyError: - getGlyphID = self.getGlyphID - return [getGlyphID(glyphName) for glyphName in lst] - - def getReverseGlyphMap(self, rebuild=False): - """Returns a mapping of glyph names to glyph IDs.""" - if rebuild or not hasattr(self, "_reverseGlyphOrderDict"): - self._buildReverseGlyphOrderDict() - return self._reverseGlyphOrderDict - - def _buildReverseGlyphOrderDict(self): - self._reverseGlyphOrderDict = d = {} - for glyphID, glyphName in enumerate(self.getGlyphOrder()): - d[glyphName] = glyphID - return d - - def _writeTable(self, tag, writer, done, tableCache=None): - """Internal helper function for self.save(). Keeps track of - inter-table dependencies. - """ - if tag in done: - return - tableClass = getTableClass(tag) - for masterTable in tableClass.dependencies: - if masterTable not in done: - if masterTable in self: - self._writeTable(masterTable, writer, done, tableCache) - else: - done.append(masterTable) - done.append(tag) - tabledata = self.getTableData(tag) - if tableCache is not None: - entry = tableCache.get((Tag(tag), tabledata)) - if entry is not None: - log.debug("reusing '%s' table", tag) - writer.setEntry(tag, entry) - return - log.debug("Writing '%s' table to disk", tag) - writer[tag] = tabledata - if tableCache is not None: - tableCache[(Tag(tag), tabledata)] = writer[tag] - - def getTableData(self, tag): - """Returns the binary representation of a table. - - If the table is currently loaded and in memory, the data is compiled to - binary and returned; if it is not currently loaded, the binary data is - read from the font file and returned. - """ - tag = Tag(tag) - if self.isLoaded(tag): - log.debug("Compiling '%s' table", tag) - return self.tables[tag].compile(self) - elif self.reader and tag in self.reader: - log.debug("Reading '%s' table from disk", tag) - return self.reader[tag] - else: - raise KeyError(tag) - - def getGlyphSet(self, preferCFF=True, location=None, normalized=False): - """Return a generic GlyphSet, which is a dict-like object - mapping glyph names to glyph objects. The returned glyph objects - have a ``.draw()`` method that supports the Pen protocol, and will - have an attribute named 'width'. - - If the font is CFF-based, the outlines will be taken from the ``CFF `` - or ``CFF2`` tables. Otherwise the outlines will be taken from the - ``glyf`` table. - - If the font contains both a ``CFF ``/``CFF2`` and a ``glyf`` table, you - can use the ``preferCFF`` argument to specify which one should be taken. - If the font contains both a ``CFF `` and a ``CFF2`` table, the latter is - taken. - - If the ``location`` parameter is set, it should be a dictionary mapping - four-letter variation tags to their float values, and the returned - glyph-set will represent an instance of a variable font at that - location. - - If the ``normalized`` variable is set to True, that location is - interpreted as in the normalized (-1..+1) space, otherwise it is in the - font's defined axes space. - """ - if location and "fvar" not in self: - location = None - if location and not normalized: - location = self.normalizeLocation(location) - if ("CFF " in self or "CFF2" in self) and (preferCFF or "glyf" not in self): - return _TTGlyphSetCFF(self, location) - elif "glyf" in self: - return _TTGlyphSetGlyf(self, location) - else: - raise TTLibError("Font contains no outlines") - - def normalizeLocation(self, location): - """Normalize a ``location`` from the font's defined axes space (also - known as user space) into the normalized (-1..+1) space. It applies - ``avar`` mapping if the font contains an ``avar`` table. - - The ``location`` parameter should be a dictionary mapping four-letter - variation tags to their float values. - - Raises ``TTLibError`` if the font is not a variable font. - """ - from fontTools.varLib.models import normalizeLocation, piecewiseLinearMap - - if "fvar" not in self: - raise TTLibError("Not a variable font") - - axes = { - a.axisTag: (a.minValue, a.defaultValue, a.maxValue) - for a in self["fvar"].axes - } - location = normalizeLocation(location, axes) - if "avar" in self: - avar = self["avar"] - avarSegments = avar.segments - mappedLocation = {} - for axisTag, value in location.items(): - avarMapping = avarSegments.get(axisTag, None) - if avarMapping is not None: - value = piecewiseLinearMap(value, avarMapping) - mappedLocation[axisTag] = value - location = mappedLocation - return location - - def getBestCmap( - self, - cmapPreferences=( - (3, 10), - (0, 6), - (0, 4), - (3, 1), - (0, 3), - (0, 2), - (0, 1), - (0, 0), - ), - ): - """Returns the 'best' Unicode cmap dictionary available in the font - or ``None``, if no Unicode cmap subtable is available. - - By default it will search for the following (platformID, platEncID) - pairs in order:: - - (3, 10), # Windows Unicode full repertoire - (0, 6), # Unicode full repertoire (format 13 subtable) - (0, 4), # Unicode 2.0 full repertoire - (3, 1), # Windows Unicode BMP - (0, 3), # Unicode 2.0 BMP - (0, 2), # Unicode ISO/IEC 10646 - (0, 1), # Unicode 1.1 - (0, 0) # Unicode 1.0 - - This particular order matches what HarfBuzz uses to choose what - subtable to use by default. This order prefers the largest-repertoire - subtable, and among those, prefers the Windows-platform over the - Unicode-platform as the former has wider support. - - This order can be customized via the ``cmapPreferences`` argument. - """ - return self["cmap"].getBestCmap(cmapPreferences=cmapPreferences) - - -class GlyphOrder(object): - - """A pseudo table. The glyph order isn't in the font as a separate - table, but it's nice to present it as such in the TTX format. - """ - - def __init__(self, tag=None): - pass - - def toXML(self, writer, ttFont): - glyphOrder = ttFont.getGlyphOrder() - writer.comment( - "The 'id' attribute is only for humans; " "it is ignored when parsed." - ) - writer.newline() - for i in range(len(glyphOrder)): - glyphName = glyphOrder[i] - writer.simpletag("GlyphID", id=i, name=glyphName) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "glyphOrder"): - self.glyphOrder = [] - if name == "GlyphID": - self.glyphOrder.append(attrs["name"]) - ttFont.setGlyphOrder(self.glyphOrder) - - -def getTableModule(tag): - """Fetch the packer/unpacker module for a table. - Return None when no module is found. - """ - from . import tables - - pyTag = tagToIdentifier(tag) - try: - __import__("fontTools.ttLib.tables." + pyTag) - except ImportError as err: - # If pyTag is found in the ImportError message, - # means table is not implemented. If it's not - # there, then some other module is missing, don't - # suppress the error. - if str(err).find(pyTag) >= 0: - return None - else: - raise err - else: - return getattr(tables, pyTag) - - -# Registry for custom table packer/unpacker classes. Keys are table -# tags, values are (moduleName, className) tuples. -# See registerCustomTableClass() and getCustomTableClass() -_customTableRegistry = {} - - -def registerCustomTableClass(tag, moduleName, className=None): - """Register a custom packer/unpacker class for a table. - - The 'moduleName' must be an importable module. If no 'className' - is given, it is derived from the tag, for example it will be - ``table_C_U_S_T_`` for a 'CUST' tag. - - The registered table class should be a subclass of - :py:class:`fontTools.ttLib.tables.DefaultTable.DefaultTable` - """ - if className is None: - className = "table_" + tagToIdentifier(tag) - _customTableRegistry[tag] = (moduleName, className) - - -def unregisterCustomTableClass(tag): - """Unregister the custom packer/unpacker class for a table.""" - del _customTableRegistry[tag] - - -def getCustomTableClass(tag): - """Return the custom table class for tag, if one has been registered - with 'registerCustomTableClass()'. Else return None. - """ - if tag not in _customTableRegistry: - return None - import importlib - - moduleName, className = _customTableRegistry[tag] - module = importlib.import_module(moduleName) - return getattr(module, className) - - -def getTableClass(tag): - """Fetch the packer/unpacker class for a table.""" - tableClass = getCustomTableClass(tag) - if tableClass is not None: - return tableClass - module = getTableModule(tag) - if module is None: - from .tables.DefaultTable import DefaultTable - - return DefaultTable - pyTag = tagToIdentifier(tag) - tableClass = getattr(module, "table_" + pyTag) - return tableClass - - -def getClassTag(klass): - """Fetch the table tag for a class object.""" - name = klass.__name__ - assert name[:6] == "table_" - name = name[6:] # Chop 'table_' - return identifierToTag(name) - - -def newTable(tag): - """Return a new instance of a table.""" - tableClass = getTableClass(tag) - return tableClass(tag) - - -def _escapechar(c): - """Helper function for tagToIdentifier()""" - import re - - if re.match("[a-z0-9]", c): - return "_" + c - elif re.match("[A-Z]", c): - return c + "_" - else: - return hex(byteord(c))[2:] - - -def tagToIdentifier(tag): - """Convert a table tag to a valid (but UGLY) python identifier, - as well as a filename that's guaranteed to be unique even on a - caseless file system. Each character is mapped to two characters. - Lowercase letters get an underscore before the letter, uppercase - letters get an underscore after the letter. Trailing spaces are - trimmed. Illegal characters are escaped as two hex bytes. If the - result starts with a number (as the result of a hex escape), an - extra underscore is prepended. Examples:: - - >>> tagToIdentifier('glyf') - '_g_l_y_f' - >>> tagToIdentifier('cvt ') - '_c_v_t' - >>> tagToIdentifier('OS/2') - 'O_S_2f_2' - """ - import re - - tag = Tag(tag) - if tag == "GlyphOrder": - return tag - assert len(tag) == 4, "tag should be 4 characters long" - while len(tag) > 1 and tag[-1] == " ": - tag = tag[:-1] - ident = "" - for c in tag: - ident = ident + _escapechar(c) - if re.match("[0-9]", ident): - ident = "_" + ident - return ident - - -def identifierToTag(ident): - """the opposite of tagToIdentifier()""" - if ident == "GlyphOrder": - return ident - if len(ident) % 2 and ident[0] == "_": - ident = ident[1:] - assert not (len(ident) % 2) - tag = "" - for i in range(0, len(ident), 2): - if ident[i] == "_": - tag = tag + ident[i + 1] - elif ident[i + 1] == "_": - tag = tag + ident[i] - else: - # assume hex - tag = tag + chr(int(ident[i : i + 2], 16)) - # append trailing spaces - tag = tag + (4 - len(tag)) * " " - return Tag(tag) - - -def tagToXML(tag): - """Similarly to tagToIdentifier(), this converts a TT tag - to a valid XML element name. Since XML element names are - case sensitive, this is a fairly simple/readable translation. - """ - import re - - tag = Tag(tag) - if tag == "OS/2": - return "OS_2" - elif tag == "GlyphOrder": - return tag - if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag): - return tag.strip() - else: - return tagToIdentifier(tag) - - -def xmlToTag(tag): - """The opposite of tagToXML()""" - if tag == "OS_2": - return Tag("OS/2") - if len(tag) == 8: - return identifierToTag(tag) - else: - return Tag(tag + " " * (4 - len(tag))) - - -# Table order as recommended in the OpenType specification 1.4 -TTFTableOrder = [ - "head", - "hhea", - "maxp", - "OS/2", - "hmtx", - "LTSH", - "VDMX", - "hdmx", - "cmap", - "fpgm", - "prep", - "cvt ", - "loca", - "glyf", - "kern", - "name", - "post", - "gasp", - "PCLT", -] - -OTFTableOrder = ["head", "hhea", "maxp", "OS/2", "name", "cmap", "post", "CFF "] - - -def sortedTagList(tagList, tableOrder=None): - """Return a sorted copy of tagList, sorted according to the OpenType - specification, or according to a custom tableOrder. If given and not - None, tableOrder needs to be a list of tag names. - """ - tagList = sorted(tagList) - if tableOrder is None: - if "DSIG" in tagList: - # DSIG should be last (XXX spec reference?) - tagList.remove("DSIG") - tagList.append("DSIG") - if "CFF " in tagList: - tableOrder = OTFTableOrder - else: - tableOrder = TTFTableOrder - orderedTables = [] - for tag in tableOrder: - if tag in tagList: - orderedTables.append(tag) - tagList.remove(tag) - orderedTables.extend(tagList) - return orderedTables - - -def reorderFontTables(inFile, outFile, tableOrder=None, checkChecksums=False): - """Rewrite a font file, ordering the tables as recommended by the - OpenType specification 1.4. - """ - inFile.seek(0) - outFile.seek(0) - reader = SFNTReader(inFile, checkChecksums=checkChecksums) - writer = SFNTWriter( - outFile, - len(reader.tables), - reader.sfntVersion, - reader.flavor, - reader.flavorData, - ) - tables = list(reader.keys()) - for tag in sortedTagList(tables, tableOrder): - writer[tag] = reader[tag] - writer.close() - - -def maxPowerOfTwo(x): - """Return the highest exponent of two, so that - (2 ** exponent) <= x. Return 0 if x is 0. - """ - exponent = 0 - while x: - x = x >> 1 - exponent = exponent + 1 - return max(exponent - 1, 0) - - -def getSearchRange(n, itemSize=16): - """Calculate searchRange, entrySelector, rangeShift.""" - # itemSize defaults to 16, for backward compatibility - # with upstream fonttools. - exponent = maxPowerOfTwo(n) - searchRange = (2**exponent) * itemSize - entrySelector = exponent - rangeShift = max(0, n * itemSize - searchRange) - return searchRange, entrySelector, rangeShift diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/projections/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/projections/__init__.py deleted file mode 100644 index 8ce118986065605a6c604d5e2f9a7183bc39c68e..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/projections/__init__.py +++ /dev/null @@ -1,126 +0,0 @@ -""" -Non-separable transforms that map from data space to screen space. - -Projections are defined as `~.axes.Axes` subclasses. They include the -following elements: - -- A transformation from data coordinates into display coordinates. - -- An inverse of that transformation. This is used, for example, to convert - mouse positions from screen space back into data space. - -- Transformations for the gridlines, ticks and ticklabels. Custom projections - will often need to place these elements in special locations, and Matplotlib - has a facility to help with doing so. - -- Setting up default values (overriding `~.axes.Axes.cla`), since the defaults - for a rectilinear axes may not be appropriate. - -- Defining the shape of the axes, for example, an elliptical axes, that will be - used to draw the background of the plot and for clipping any data elements. - -- Defining custom locators and formatters for the projection. For example, in - a geographic projection, it may be more convenient to display the grid in - degrees, even if the data is in radians. - -- Set up interactive panning and zooming. This is left as an "advanced" - feature left to the reader, but there is an example of this for polar plots - in `matplotlib.projections.polar`. - -- Any additional methods for additional convenience or features. - -Once the projection axes is defined, it can be used in one of two ways: - -- By defining the class attribute ``name``, the projection axes can be - registered with `matplotlib.projections.register_projection` and subsequently - simply invoked by name:: - - fig.add_subplot(projection="my_proj_name") - -- For more complex, parameterisable projections, a generic "projection" object - may be defined which includes the method ``_as_mpl_axes``. ``_as_mpl_axes`` - should take no arguments and return the projection's axes subclass and a - dictionary of additional arguments to pass to the subclass' ``__init__`` - method. Subsequently a parameterised projection can be initialised with:: - - fig.add_subplot(projection=MyProjection(param1=param1_value)) - - where MyProjection is an object which implements a ``_as_mpl_axes`` method. - -A full-fledged and heavily annotated example is in -:doc:`/gallery/misc/custom_projection`. The polar plot functionality in -`matplotlib.projections.polar` may also be of interest. -""" - -from .. import axes, _docstring -from .geo import AitoffAxes, HammerAxes, LambertAxes, MollweideAxes -from .polar import PolarAxes - -try: - from mpl_toolkits.mplot3d import Axes3D -except ImportError: - import warnings - warnings.warn("Unable to import Axes3D. This may be due to multiple versions of " - "Matplotlib being installed (e.g. as a system package and as a pip " - "package). As a result, the 3D projection is not available.") - Axes3D = None - - -class ProjectionRegistry: - """A mapping of registered projection names to projection classes.""" - - def __init__(self): - self._all_projection_types = {} - - def register(self, *projections): - """Register a new set of projections.""" - for projection in projections: - name = projection.name - self._all_projection_types[name] = projection - - def get_projection_class(self, name): - """Get a projection class from its *name*.""" - return self._all_projection_types[name] - - def get_projection_names(self): - """Return the names of all projections currently registered.""" - return sorted(self._all_projection_types) - - -projection_registry = ProjectionRegistry() -projection_registry.register( - axes.Axes, - PolarAxes, - AitoffAxes, - HammerAxes, - LambertAxes, - MollweideAxes, -) -if Axes3D is not None: - projection_registry.register(Axes3D) -else: - # remove from namespace if not importable - del Axes3D - - -def register_projection(cls): - projection_registry.register(cls) - - -def get_projection_class(projection=None): - """ - Get a projection class from its name. - - If *projection* is None, a standard rectilinear projection is returned. - """ - if projection is None: - projection = 'rectilinear' - - try: - return projection_registry.get_projection_class(projection) - except KeyError as err: - raise ValueError("Unknown projection %r" % projection) from err - - -get_projection_names = projection_registry.get_projection_names -_docstring.interpd.update(projection_names=get_projection_names()) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 deleted file mode 100644 index 02ac9dd993b39dbb69a233ed1f0d031f15f84639..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 +++ /dev/null @@ -1,23 +0,0 @@ -! Check that parameters are correct intercepted. -! Constants with comma separations are commonly -! used, for instance Pi = 3._dp -subroutine foo_single(x) - implicit none - integer, parameter :: rp = selected_real_kind(6) - real(rp), intent(inout) :: x - dimension x(3) - real(rp), parameter :: three = 3._rp - x(1) = x(1) + x(2) + x(3) * three - return -end subroutine - -subroutine foo_double(x) - implicit none - integer, parameter :: rp = selected_real_kind(15) - real(rp), intent(inout) :: x - dimension x(3) - real(rp), parameter :: three = 3._rp - x(1) = x(1) + x(2) + x(3) * three - return -end subroutine - diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/dtype.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/dtype.py deleted file mode 100644 index 6ec44e6b06cdc443b1dd930d8f06fe70be577e97..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/dtype.py +++ /dev/null @@ -1,57 +0,0 @@ -import numpy as np - -dtype_obj = np.dtype(np.str_) -void_dtype_obj = np.dtype([("f0", np.float64), ("f1", np.float32)]) - -np.dtype(dtype=np.int64) -np.dtype(int) -np.dtype("int") -np.dtype(None) - -np.dtype((int, 2)) -np.dtype((int, (1,))) - -np.dtype({"names": ["a", "b"], "formats": [int, float]}) -np.dtype({"names": ["a"], "formats": [int], "titles": [object]}) -np.dtype({"names": ["a"], "formats": [int], "titles": [object()]}) - -np.dtype([("name", np.str_, 16), ("grades", np.float64, (2,)), ("age", "int32")]) - -np.dtype( - { - "names": ["a", "b"], - "formats": [int, float], - "itemsize": 9, - "aligned": False, - "titles": ["x", "y"], - "offsets": [0, 1], - } -) - -np.dtype((np.float_, float)) - - -class Test: - dtype = np.dtype(float) - - -np.dtype(Test()) - -# Methods and attributes -dtype_obj.base -dtype_obj.subdtype -dtype_obj.newbyteorder() -dtype_obj.type -dtype_obj.name -dtype_obj.names - -dtype_obj * 0 -dtype_obj * 2 - -0 * dtype_obj -2 * dtype_obj - -void_dtype_obj["f0"] -void_dtype_obj[0] -void_dtype_obj[["f0", "f1"]] -void_dtype_obj[["f0"]] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_contains.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_contains.py deleted file mode 100644 index 956642697bf3285e5c661c43047a5f0dafa83144..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_contains.py +++ /dev/null @@ -1,12 +0,0 @@ -import numpy as np - -import pandas as pd - - -def test_contains_nan(): - # GH#52840 - arr = pd.array(range(5)) / 0 - - assert np.isnan(arr._data[0]) - assert not arr.isna()[0] - assert np.nan in arr diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_nlargest.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_nlargest.py deleted file mode 100644 index 0bdf9a0e5c007cdccbf8a2cb3cda1784b993c751..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_nlargest.py +++ /dev/null @@ -1,250 +0,0 @@ -""" -Note: for naming purposes, most tests are title with as e.g. "test_nlargest_foo" -but are implicitly also testing nsmallest_foo. -""" -from string import ascii_lowercase - -import numpy as np -import pytest - -import pandas as pd -import pandas._testing as tm -from pandas.util.version import Version - - -@pytest.fixture -def df_duplicates(): - return pd.DataFrame( - {"a": [1, 2, 3, 4, 4], "b": [1, 1, 1, 1, 1], "c": [0, 1, 2, 5, 4]}, - index=[0, 0, 1, 1, 1], - ) - - -@pytest.fixture -def df_strings(): - return pd.DataFrame( - { - "a": np.random.default_rng(2).permutation(10), - "b": list(ascii_lowercase[:10]), - "c": np.random.default_rng(2).permutation(10).astype("float64"), - } - ) - - -@pytest.fixture -def df_main_dtypes(): - return pd.DataFrame( - { - "group": [1, 1, 2], - "int": [1, 2, 3], - "float": [4.0, 5.0, 6.0], - "string": list("abc"), - "category_string": pd.Series(list("abc")).astype("category"), - "category_int": [7, 8, 9], - "datetime": pd.date_range("20130101", periods=3), - "datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"), - "timedelta": pd.timedelta_range("1 s", periods=3, freq="s"), - }, - columns=[ - "group", - "int", - "float", - "string", - "category_string", - "category_int", - "datetime", - "datetimetz", - "timedelta", - ], - ) - - -class TestNLargestNSmallest: - # ---------------------------------------------------------------------- - # Top / bottom - @pytest.mark.parametrize( - "order", - [ - ["a"], - ["c"], - ["a", "b"], - ["a", "c"], - ["b", "a"], - ["b", "c"], - ["a", "b", "c"], - ["c", "a", "b"], - ["c", "b", "a"], - ["b", "c", "a"], - ["b", "a", "c"], - # dups! - ["b", "c", "c"], - ], - ) - @pytest.mark.parametrize("n", range(1, 11)) - def test_nlargest_n(self, df_strings, nselect_method, n, order): - # GH#10393 - df = df_strings - if "b" in order: - error_msg = ( - f"Column 'b' has dtype object, " - f"cannot use method '{nselect_method}' with this dtype" - ) - with pytest.raises(TypeError, match=error_msg): - getattr(df, nselect_method)(n, order) - else: - ascending = nselect_method == "nsmallest" - result = getattr(df, nselect_method)(n, order) - expected = df.sort_values(order, ascending=ascending).head(n) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( - "columns", [["group", "category_string"], ["group", "string"]] - ) - def test_nlargest_error(self, df_main_dtypes, nselect_method, columns): - df = df_main_dtypes - col = columns[1] - error_msg = ( - f"Column '{col}' has dtype {df[col].dtype}, " - f"cannot use method '{nselect_method}' with this dtype" - ) - # escape some characters that may be in the repr - error_msg = ( - error_msg.replace("(", "\\(") - .replace(")", "\\)") - .replace("[", "\\[") - .replace("]", "\\]") - ) - with pytest.raises(TypeError, match=error_msg): - getattr(df, nselect_method)(2, columns) - - def test_nlargest_all_dtypes(self, df_main_dtypes): - df = df_main_dtypes - df.nsmallest(2, list(set(df) - {"category_string", "string"})) - df.nlargest(2, list(set(df) - {"category_string", "string"})) - - def test_nlargest_duplicates_on_starter_columns(self): - # regression test for GH#22752 - - df = pd.DataFrame({"a": [2, 2, 2, 1, 1, 1], "b": [1, 2, 3, 3, 2, 1]}) - - result = df.nlargest(4, columns=["a", "b"]) - expected = pd.DataFrame( - {"a": [2, 2, 2, 1], "b": [3, 2, 1, 3]}, index=[2, 1, 0, 3] - ) - tm.assert_frame_equal(result, expected) - - result = df.nsmallest(4, columns=["a", "b"]) - expected = pd.DataFrame( - {"a": [1, 1, 1, 2], "b": [1, 2, 3, 1]}, index=[5, 4, 3, 0] - ) - tm.assert_frame_equal(result, expected) - - def test_nlargest_n_identical_values(self): - # GH#15297 - df = pd.DataFrame({"a": [1] * 5, "b": [1, 2, 3, 4, 5]}) - - result = df.nlargest(3, "a") - expected = pd.DataFrame({"a": [1] * 3, "b": [1, 2, 3]}, index=[0, 1, 2]) - tm.assert_frame_equal(result, expected) - - result = df.nsmallest(3, "a") - expected = pd.DataFrame({"a": [1] * 3, "b": [1, 2, 3]}) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( - "order", - [["a", "b", "c"], ["c", "b", "a"], ["a"], ["b"], ["a", "b"], ["c", "b"]], - ) - @pytest.mark.parametrize("n", range(1, 6)) - def test_nlargest_n_duplicate_index(self, df_duplicates, n, order, request): - # GH#13412 - - df = df_duplicates - result = df.nsmallest(n, order) - expected = df.sort_values(order).head(n) - tm.assert_frame_equal(result, expected) - - result = df.nlargest(n, order) - expected = df.sort_values(order, ascending=False).head(n) - if Version(np.__version__) >= Version("1.25") and ( - (order == ["a"] and n in (1, 2, 3, 4)) or (order == ["a", "b"]) and n == 5 - ): - request.node.add_marker( - pytest.mark.xfail( - reason=( - "pandas default unstable sorting of duplicates" - "issue with numpy>=1.25 with AVX instructions" - ), - strict=False, - ) - ) - tm.assert_frame_equal(result, expected) - - def test_nlargest_duplicate_keep_all_ties(self): - # GH#16818 - df = pd.DataFrame( - {"a": [5, 4, 4, 2, 3, 3, 3, 3], "b": [10, 9, 8, 7, 5, 50, 10, 20]} - ) - result = df.nlargest(4, "a", keep="all") - expected = pd.DataFrame( - { - "a": {0: 5, 1: 4, 2: 4, 4: 3, 5: 3, 6: 3, 7: 3}, - "b": {0: 10, 1: 9, 2: 8, 4: 5, 5: 50, 6: 10, 7: 20}, - } - ) - tm.assert_frame_equal(result, expected) - - result = df.nsmallest(2, "a", keep="all") - expected = pd.DataFrame( - { - "a": {3: 2, 4: 3, 5: 3, 6: 3, 7: 3}, - "b": {3: 7, 4: 5, 5: 50, 6: 10, 7: 20}, - } - ) - tm.assert_frame_equal(result, expected) - - def test_nlargest_multiindex_column_lookup(self): - # Check whether tuples are correctly treated as multi-level lookups. - # GH#23033 - df = pd.DataFrame( - columns=pd.MultiIndex.from_product([["x"], ["a", "b"]]), - data=[[0.33, 0.13], [0.86, 0.25], [0.25, 0.70], [0.85, 0.91]], - ) - - # nsmallest - result = df.nsmallest(3, ("x", "a")) - expected = df.iloc[[2, 0, 3]] - tm.assert_frame_equal(result, expected) - - # nlargest - result = df.nlargest(3, ("x", "b")) - expected = df.iloc[[3, 2, 1]] - tm.assert_frame_equal(result, expected) - - def test_nlargest_nan(self): - # GH#43060 - df = pd.DataFrame([np.nan, np.nan, 0, 1, 2, 3]) - result = df.nlargest(5, 0) - expected = df.sort_values(0, ascending=False).head(5) - tm.assert_frame_equal(result, expected) - - def test_nsmallest_nan_after_n_element(self): - # GH#46589 - df = pd.DataFrame( - { - "a": [1, 2, 3, 4, 5, None, 7], - "b": [7, 6, 5, 4, 3, 2, 1], - "c": [1, 1, 2, 2, 3, 3, 3], - }, - index=range(7), - ) - result = df.nsmallest(5, columns=["a", "b"]) - expected = pd.DataFrame( - { - "a": [1, 2, 3, 4, 5], - "b": [7, 6, 5, 4, 3], - "c": [1, 1, 2, 2, 3], - }, - index=range(5), - ).astype({"a": "float"}) - tm.assert_frame_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_interval_range.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_interval_range.py deleted file mode 100644 index 57783265b04b347ef31d998f4288856f08bee5ca..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_interval_range.py +++ /dev/null @@ -1,365 +0,0 @@ -from datetime import timedelta - -import numpy as np -import pytest - -from pandas.core.dtypes.common import is_integer - -from pandas import ( - DateOffset, - Interval, - IntervalIndex, - Timedelta, - Timestamp, - date_range, - interval_range, - timedelta_range, -) -import pandas._testing as tm - -from pandas.tseries.offsets import Day - - -@pytest.fixture(params=[None, "foo"]) -def name(request): - return request.param - - -class TestIntervalRange: - @pytest.mark.parametrize("freq, periods", [(1, 100), (2.5, 40), (5, 20), (25, 4)]) - def test_constructor_numeric(self, closed, name, freq, periods): - start, end = 0, 100 - breaks = np.arange(101, step=freq) - expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - - # defined from start/end/freq - result = interval_range( - start=start, end=end, freq=freq, name=name, closed=closed - ) - tm.assert_index_equal(result, expected) - - # defined from start/periods/freq - result = interval_range( - start=start, periods=periods, freq=freq, name=name, closed=closed - ) - tm.assert_index_equal(result, expected) - - # defined from end/periods/freq - result = interval_range( - end=end, periods=periods, freq=freq, name=name, closed=closed - ) - tm.assert_index_equal(result, expected) - - # GH 20976: linspace behavior defined from start/end/periods - result = interval_range( - start=start, end=end, periods=periods, name=name, closed=closed - ) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize("tz", [None, "US/Eastern"]) - @pytest.mark.parametrize( - "freq, periods", [("D", 364), ("2D", 182), ("22D18H", 16), ("M", 11)] - ) - def test_constructor_timestamp(self, closed, name, freq, periods, tz): - start, end = Timestamp("20180101", tz=tz), Timestamp("20181231", tz=tz) - breaks = date_range(start=start, end=end, freq=freq) - expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - - # defined from start/end/freq - result = interval_range( - start=start, end=end, freq=freq, name=name, closed=closed - ) - tm.assert_index_equal(result, expected) - - # defined from start/periods/freq - result = interval_range( - start=start, periods=periods, freq=freq, name=name, closed=closed - ) - tm.assert_index_equal(result, expected) - - # defined from end/periods/freq - result = interval_range( - end=end, periods=periods, freq=freq, name=name, closed=closed - ) - tm.assert_index_equal(result, expected) - - # GH 20976: linspace behavior defined from start/end/periods - if not breaks.freq.is_anchored() and tz is None: - # matches expected only for non-anchored offsets and tz naive - # (anchored/DST transitions cause unequal spacing in expected) - result = interval_range( - start=start, end=end, periods=periods, name=name, closed=closed - ) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize( - "freq, periods", [("D", 100), ("2D12H", 40), ("5D", 20), ("25D", 4)] - ) - def test_constructor_timedelta(self, closed, name, freq, periods): - start, end = Timedelta("0 days"), Timedelta("100 days") - breaks = timedelta_range(start=start, end=end, freq=freq) - expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - - # defined from start/end/freq - result = interval_range( - start=start, end=end, freq=freq, name=name, closed=closed - ) - tm.assert_index_equal(result, expected) - - # defined from start/periods/freq - result = interval_range( - start=start, periods=periods, freq=freq, name=name, closed=closed - ) - tm.assert_index_equal(result, expected) - - # defined from end/periods/freq - result = interval_range( - end=end, periods=periods, freq=freq, name=name, closed=closed - ) - tm.assert_index_equal(result, expected) - - # GH 20976: linspace behavior defined from start/end/periods - result = interval_range( - start=start, end=end, periods=periods, name=name, closed=closed - ) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize( - "start, end, freq, expected_endpoint", - [ - (0, 10, 3, 9), - (0, 10, 1.5, 9), - (0.5, 10, 3, 9.5), - (Timedelta("0D"), Timedelta("10D"), "2D4H", Timedelta("8D16H")), - ( - Timestamp("2018-01-01"), - Timestamp("2018-02-09"), - "MS", - Timestamp("2018-02-01"), - ), - ( - Timestamp("2018-01-01", tz="US/Eastern"), - Timestamp("2018-01-20", tz="US/Eastern"), - "5D12H", - Timestamp("2018-01-17 12:00:00", tz="US/Eastern"), - ), - ], - ) - def test_early_truncation(self, start, end, freq, expected_endpoint): - # index truncates early if freq causes end to be skipped - result = interval_range(start=start, end=end, freq=freq) - result_endpoint = result.right[-1] - assert result_endpoint == expected_endpoint - - @pytest.mark.parametrize( - "start, end, freq", - [(0.5, None, None), (None, 4.5, None), (0.5, None, 1.5), (None, 6.5, 1.5)], - ) - def test_no_invalid_float_truncation(self, start, end, freq): - # GH 21161 - if freq is None: - breaks = [0.5, 1.5, 2.5, 3.5, 4.5] - else: - breaks = [0.5, 2.0, 3.5, 5.0, 6.5] - expected = IntervalIndex.from_breaks(breaks) - - result = interval_range(start=start, end=end, periods=4, freq=freq) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize( - "start, mid, end", - [ - ( - Timestamp("2018-03-10", tz="US/Eastern"), - Timestamp("2018-03-10 23:30:00", tz="US/Eastern"), - Timestamp("2018-03-12", tz="US/Eastern"), - ), - ( - Timestamp("2018-11-03", tz="US/Eastern"), - Timestamp("2018-11-04 00:30:00", tz="US/Eastern"), - Timestamp("2018-11-05", tz="US/Eastern"), - ), - ], - ) - def test_linspace_dst_transition(self, start, mid, end): - # GH 20976: linspace behavior defined from start/end/periods - # accounts for the hour gained/lost during DST transition - result = interval_range(start=start, end=end, periods=2) - expected = IntervalIndex.from_breaks([start, mid, end]) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize("freq", [2, 2.0]) - @pytest.mark.parametrize("end", [10, 10.0]) - @pytest.mark.parametrize("start", [0, 0.0]) - def test_float_subtype(self, start, end, freq): - # Has float subtype if any of start/end/freq are float, even if all - # resulting endpoints can safely be upcast to integers - - # defined from start/end/freq - index = interval_range(start=start, end=end, freq=freq) - result = index.dtype.subtype - expected = "int64" if is_integer(start + end + freq) else "float64" - assert result == expected - - # defined from start/periods/freq - index = interval_range(start=start, periods=5, freq=freq) - result = index.dtype.subtype - expected = "int64" if is_integer(start + freq) else "float64" - assert result == expected - - # defined from end/periods/freq - index = interval_range(end=end, periods=5, freq=freq) - result = index.dtype.subtype - expected = "int64" if is_integer(end + freq) else "float64" - assert result == expected - - # GH 20976: linspace behavior defined from start/end/periods - index = interval_range(start=start, end=end, periods=5) - result = index.dtype.subtype - expected = "int64" if is_integer(start + end) else "float64" - assert result == expected - - def test_constructor_coverage(self): - # float value for periods - expected = interval_range(start=0, periods=10) - result = interval_range(start=0, periods=10.5) - tm.assert_index_equal(result, expected) - - # equivalent timestamp-like start/end - start, end = Timestamp("2017-01-01"), Timestamp("2017-01-15") - expected = interval_range(start=start, end=end) - - result = interval_range(start=start.to_pydatetime(), end=end.to_pydatetime()) - tm.assert_index_equal(result, expected) - - result = interval_range(start=start.asm8, end=end.asm8) - tm.assert_index_equal(result, expected) - - # equivalent freq with timestamp - equiv_freq = [ - "D", - Day(), - Timedelta(days=1), - timedelta(days=1), - DateOffset(days=1), - ] - for freq in equiv_freq: - result = interval_range(start=start, end=end, freq=freq) - tm.assert_index_equal(result, expected) - - # equivalent timedelta-like start/end - start, end = Timedelta(days=1), Timedelta(days=10) - expected = interval_range(start=start, end=end) - - result = interval_range(start=start.to_pytimedelta(), end=end.to_pytimedelta()) - tm.assert_index_equal(result, expected) - - result = interval_range(start=start.asm8, end=end.asm8) - tm.assert_index_equal(result, expected) - - # equivalent freq with timedelta - equiv_freq = ["D", Day(), Timedelta(days=1), timedelta(days=1)] - for freq in equiv_freq: - result = interval_range(start=start, end=end, freq=freq) - tm.assert_index_equal(result, expected) - - def test_errors(self): - # not enough params - msg = ( - "Of the four parameters: start, end, periods, and freq, " - "exactly three must be specified" - ) - - with pytest.raises(ValueError, match=msg): - interval_range(start=0) - - with pytest.raises(ValueError, match=msg): - interval_range(end=5) - - with pytest.raises(ValueError, match=msg): - interval_range(periods=2) - - with pytest.raises(ValueError, match=msg): - interval_range() - - # too many params - with pytest.raises(ValueError, match=msg): - interval_range(start=0, end=5, periods=6, freq=1.5) - - # mixed units - msg = "start, end, freq need to be type compatible" - with pytest.raises(TypeError, match=msg): - interval_range(start=0, end=Timestamp("20130101"), freq=2) - - with pytest.raises(TypeError, match=msg): - interval_range(start=0, end=Timedelta("1 day"), freq=2) - - with pytest.raises(TypeError, match=msg): - interval_range(start=0, end=10, freq="D") - - with pytest.raises(TypeError, match=msg): - interval_range(start=Timestamp("20130101"), end=10, freq="D") - - with pytest.raises(TypeError, match=msg): - interval_range( - start=Timestamp("20130101"), end=Timedelta("1 day"), freq="D" - ) - - with pytest.raises(TypeError, match=msg): - interval_range( - start=Timestamp("20130101"), end=Timestamp("20130110"), freq=2 - ) - - with pytest.raises(TypeError, match=msg): - interval_range(start=Timedelta("1 day"), end=10, freq="D") - - with pytest.raises(TypeError, match=msg): - interval_range( - start=Timedelta("1 day"), end=Timestamp("20130110"), freq="D" - ) - - with pytest.raises(TypeError, match=msg): - interval_range(start=Timedelta("1 day"), end=Timedelta("10 days"), freq=2) - - # invalid periods - msg = "periods must be a number, got foo" - with pytest.raises(TypeError, match=msg): - interval_range(start=0, periods="foo") - - # invalid start - msg = "start must be numeric or datetime-like, got foo" - with pytest.raises(ValueError, match=msg): - interval_range(start="foo", periods=10) - - # invalid end - msg = r"end must be numeric or datetime-like, got \(0, 1\]" - with pytest.raises(ValueError, match=msg): - interval_range(end=Interval(0, 1), periods=10) - - # invalid freq for datetime-like - msg = "freq must be numeric or convertible to DateOffset, got foo" - with pytest.raises(ValueError, match=msg): - interval_range(start=0, end=10, freq="foo") - - with pytest.raises(ValueError, match=msg): - interval_range(start=Timestamp("20130101"), periods=10, freq="foo") - - with pytest.raises(ValueError, match=msg): - interval_range(end=Timedelta("1 day"), periods=10, freq="foo") - - # mixed tz - start = Timestamp("2017-01-01", tz="US/Eastern") - end = Timestamp("2017-01-07", tz="US/Pacific") - msg = "Start and end cannot both be tz-aware with different timezones" - with pytest.raises(TypeError, match=msg): - interval_range(start=start, end=end) - - def test_float_freq(self): - # GH 54477 - result = interval_range(0, 1, freq=0.1) - expected = IntervalIndex.from_breaks([0 + 0.1 * n for n in range(11)]) - tm.assert_index_equal(result, expected) - - result = interval_range(0, 1, freq=0.6) - expected = IntervalIndex.from_breaks([0, 0.6]) - tm.assert_index_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/test_stata.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/test_stata.py deleted file mode 100644 index 7459aa1df8f3e3514720a56bb9935509b5a70e91..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/test_stata.py +++ /dev/null @@ -1,2324 +0,0 @@ -import bz2 -import datetime as dt -from datetime import datetime -import gzip -import io -import os -import struct -import tarfile -import zipfile - -import numpy as np -import pytest - -import pandas as pd -from pandas import CategoricalDtype -import pandas._testing as tm -from pandas.core.frame import ( - DataFrame, - Series, -) - -from pandas.io.parsers import read_csv -from pandas.io.stata import ( - CategoricalConversionWarning, - InvalidColumnName, - PossiblePrecisionLoss, - StataMissingValue, - StataReader, - StataWriter, - StataWriterUTF8, - ValueLabelTypeMismatch, - read_stata, -) - - -@pytest.fixture -def mixed_frame(): - return DataFrame( - { - "a": [1, 2, 3, 4], - "b": [1.0, 3.0, 27.0, 81.0], - "c": ["Atlanta", "Birmingham", "Cincinnati", "Detroit"], - } - ) - - -@pytest.fixture -def parsed_114(datapath): - dta14_114 = datapath("io", "data", "stata", "stata5_114.dta") - parsed_114 = read_stata(dta14_114, convert_dates=True) - parsed_114.index.name = "index" - return parsed_114 - - -class TestStata: - def read_dta(self, file): - # Legacy default reader configuration - return read_stata(file, convert_dates=True) - - def read_csv(self, file): - return read_csv(file, parse_dates=True) - - @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) - def test_read_empty_dta(self, version): - empty_ds = DataFrame(columns=["unit"]) - # GH 7369, make sure can read a 0-obs dta file - with tm.ensure_clean() as path: - empty_ds.to_stata(path, write_index=False, version=version) - empty_ds2 = read_stata(path) - tm.assert_frame_equal(empty_ds, empty_ds2) - - @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) - def test_read_empty_dta_with_dtypes(self, version): - # GH 46240 - # Fixing above bug revealed that types are not correctly preserved when - # writing empty DataFrames - empty_df_typed = DataFrame( - { - "i8": np.array([0], dtype=np.int8), - "i16": np.array([0], dtype=np.int16), - "i32": np.array([0], dtype=np.int32), - "i64": np.array([0], dtype=np.int64), - "u8": np.array([0], dtype=np.uint8), - "u16": np.array([0], dtype=np.uint16), - "u32": np.array([0], dtype=np.uint32), - "u64": np.array([0], dtype=np.uint64), - "f32": np.array([0], dtype=np.float32), - "f64": np.array([0], dtype=np.float64), - } - ) - expected = empty_df_typed.copy() - # No uint# support. Downcast since values in range for int# - expected["u8"] = expected["u8"].astype(np.int8) - expected["u16"] = expected["u16"].astype(np.int16) - expected["u32"] = expected["u32"].astype(np.int32) - # No int64 supported at all. Downcast since values in range for int32 - expected["u64"] = expected["u64"].astype(np.int32) - expected["i64"] = expected["i64"].astype(np.int32) - - # GH 7369, make sure can read a 0-obs dta file - with tm.ensure_clean() as path: - empty_df_typed.to_stata(path, write_index=False, version=version) - empty_reread = read_stata(path) - tm.assert_frame_equal(expected, empty_reread) - tm.assert_series_equal(expected.dtypes, empty_reread.dtypes) - - @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) - def test_read_index_col_none(self, version): - df = DataFrame({"a": range(5), "b": ["b1", "b2", "b3", "b4", "b5"]}) - # GH 7369, make sure can read a 0-obs dta file - with tm.ensure_clean() as path: - df.to_stata(path, write_index=False, version=version) - read_df = read_stata(path) - - assert isinstance(read_df.index, pd.RangeIndex) - expected = df.copy() - expected["a"] = expected["a"].astype(np.int32) - tm.assert_frame_equal(read_df, expected, check_index_type=True) - - @pytest.mark.parametrize("file", ["stata1_114", "stata1_117"]) - def test_read_dta1(self, file, datapath): - file = datapath("io", "data", "stata", f"{file}.dta") - parsed = self.read_dta(file) - - # Pandas uses np.nan as missing value. - # Thus, all columns will be of type float, regardless of their name. - expected = DataFrame( - [(np.nan, np.nan, np.nan, np.nan, np.nan)], - columns=["float_miss", "double_miss", "byte_miss", "int_miss", "long_miss"], - ) - - # this is an oddity as really the nan should be float64, but - # the casting doesn't fail so need to match stata here - expected["float_miss"] = expected["float_miss"].astype(np.float32) - - tm.assert_frame_equal(parsed, expected) - - @pytest.mark.filterwarnings("always") - def test_read_dta2(self, datapath): - expected = DataFrame.from_records( - [ - ( - datetime(2006, 11, 19, 23, 13, 20), - 1479596223000, - datetime(2010, 1, 20), - datetime(2010, 1, 8), - datetime(2010, 1, 1), - datetime(1974, 7, 1), - datetime(2010, 1, 1), - datetime(2010, 1, 1), - ), - ( - datetime(1959, 12, 31, 20, 3, 20), - -1479590, - datetime(1953, 10, 2), - datetime(1948, 6, 10), - datetime(1955, 1, 1), - datetime(1955, 7, 1), - datetime(1955, 1, 1), - datetime(2, 1, 1), - ), - (pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT), - ], - columns=[ - "datetime_c", - "datetime_big_c", - "date", - "weekly_date", - "monthly_date", - "quarterly_date", - "half_yearly_date", - "yearly_date", - ], - ) - expected["yearly_date"] = expected["yearly_date"].astype("O") - - path1 = datapath("io", "data", "stata", "stata2_114.dta") - path2 = datapath("io", "data", "stata", "stata2_115.dta") - path3 = datapath("io", "data", "stata", "stata2_117.dta") - - with tm.assert_produces_warning(UserWarning): - parsed_114 = self.read_dta(path1) - with tm.assert_produces_warning(UserWarning): - parsed_115 = self.read_dta(path2) - with tm.assert_produces_warning(UserWarning): - parsed_117 = self.read_dta(path3) - # 113 is buggy due to limits of date format support in Stata - # parsed_113 = self.read_dta( - # datapath("io", "data", "stata", "stata2_113.dta") - # ) - - # buggy test because of the NaT comparison on certain platforms - # Format 113 test fails since it does not support tc and tC formats - # tm.assert_frame_equal(parsed_113, expected) - tm.assert_frame_equal(parsed_114, expected, check_datetimelike_compat=True) - tm.assert_frame_equal(parsed_115, expected, check_datetimelike_compat=True) - tm.assert_frame_equal(parsed_117, expected, check_datetimelike_compat=True) - - @pytest.mark.parametrize( - "file", ["stata3_113", "stata3_114", "stata3_115", "stata3_117"] - ) - def test_read_dta3(self, file, datapath): - file = datapath("io", "data", "stata", f"{file}.dta") - parsed = self.read_dta(file) - - # match stata here - expected = self.read_csv(datapath("io", "data", "stata", "stata3.csv")) - expected = expected.astype(np.float32) - expected["year"] = expected["year"].astype(np.int16) - expected["quarter"] = expected["quarter"].astype(np.int8) - - tm.assert_frame_equal(parsed, expected) - - @pytest.mark.parametrize( - "file", ["stata4_113", "stata4_114", "stata4_115", "stata4_117"] - ) - def test_read_dta4(self, file, datapath): - file = datapath("io", "data", "stata", f"{file}.dta") - parsed = self.read_dta(file) - - expected = DataFrame.from_records( - [ - ["one", "ten", "one", "one", "one"], - ["two", "nine", "two", "two", "two"], - ["three", "eight", "three", "three", "three"], - ["four", "seven", 4, "four", "four"], - ["five", "six", 5, np.nan, "five"], - ["six", "five", 6, np.nan, "six"], - ["seven", "four", 7, np.nan, "seven"], - ["eight", "three", 8, np.nan, "eight"], - ["nine", "two", 9, np.nan, "nine"], - ["ten", "one", "ten", np.nan, "ten"], - ], - columns=[ - "fully_labeled", - "fully_labeled2", - "incompletely_labeled", - "labeled_with_missings", - "float_labelled", - ], - ) - - # these are all categoricals - for col in expected: - orig = expected[col].copy() - - categories = np.asarray(expected["fully_labeled"][orig.notna()]) - if col == "incompletely_labeled": - categories = orig - - cat = orig.astype("category")._values - cat = cat.set_categories(categories, ordered=True) - cat.categories.rename(None, inplace=True) - - expected[col] = cat - - # stata doesn't save .category metadata - tm.assert_frame_equal(parsed, expected) - - # File containing strls - def test_read_dta12(self, datapath): - parsed_117 = self.read_dta(datapath("io", "data", "stata", "stata12_117.dta")) - expected = DataFrame.from_records( - [ - [1, "abc", "abcdefghi"], - [3, "cba", "qwertywertyqwerty"], - [93, "", "strl"], - ], - columns=["x", "y", "z"], - ) - - tm.assert_frame_equal(parsed_117, expected, check_dtype=False) - - def test_read_dta18(self, datapath): - parsed_118 = self.read_dta(datapath("io", "data", "stata", "stata14_118.dta")) - parsed_118["Bytes"] = parsed_118["Bytes"].astype("O") - expected = DataFrame.from_records( - [ - ["Cat", "Bogota", "Bogotá", 1, 1.0, "option b Ünicode", 1.0], - ["Dog", "Boston", "Uzunköprü", np.nan, np.nan, np.nan, np.nan], - ["Plane", "Rome", "Tromsø", 0, 0.0, "option a", 0.0], - ["Potato", "Tokyo", "Elâzığ", -4, 4.0, 4, 4], # noqa: RUF001 - ["", "", "", 0, 0.3332999, "option a", 1 / 3.0], - ], - columns=[ - "Things", - "Cities", - "Unicode_Cities_Strl", - "Ints", - "Floats", - "Bytes", - "Longs", - ], - ) - expected["Floats"] = expected["Floats"].astype(np.float32) - for col in parsed_118.columns: - tm.assert_almost_equal(parsed_118[col], expected[col]) - - with StataReader(datapath("io", "data", "stata", "stata14_118.dta")) as rdr: - vl = rdr.variable_labels() - vl_expected = { - "Unicode_Cities_Strl": "Here are some strls with Ünicode chars", - "Longs": "long data", - "Things": "Here are some things", - "Bytes": "byte data", - "Ints": "int data", - "Cities": "Here are some cities", - "Floats": "float data", - } - tm.assert_dict_equal(vl, vl_expected) - - assert rdr.data_label == "This is a Ünicode data label" - - def test_read_write_dta5(self): - original = DataFrame( - [(np.nan, np.nan, np.nan, np.nan, np.nan)], - columns=["float_miss", "double_miss", "byte_miss", "int_miss", "long_miss"], - ) - original.index.name = "index" - - with tm.ensure_clean() as path: - original.to_stata(path, convert_dates=None) - written_and_read_again = self.read_dta(path) - - expected = original.copy() - expected.index = expected.index.astype(np.int32) - tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) - - def test_write_dta6(self, datapath): - original = self.read_csv(datapath("io", "data", "stata", "stata3.csv")) - original.index.name = "index" - original.index = original.index.astype(np.int32) - original["year"] = original["year"].astype(np.int32) - original["quarter"] = original["quarter"].astype(np.int32) - - with tm.ensure_clean() as path: - original.to_stata(path, convert_dates=None) - written_and_read_again = self.read_dta(path) - tm.assert_frame_equal( - written_and_read_again.set_index("index"), - original, - check_index_type=False, - ) - - @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) - def test_read_write_dta10(self, version): - original = DataFrame( - data=[["string", "object", 1, 1.1, np.datetime64("2003-12-25")]], - columns=["string", "object", "integer", "floating", "datetime"], - ) - original["object"] = Series(original["object"], dtype=object) - original.index.name = "index" - original.index = original.index.astype(np.int32) - original["integer"] = original["integer"].astype(np.int32) - - with tm.ensure_clean() as path: - original.to_stata(path, convert_dates={"datetime": "tc"}, version=version) - written_and_read_again = self.read_dta(path) - # original.index is np.int32, read index is np.int64 - tm.assert_frame_equal( - written_and_read_again.set_index("index"), - original, - check_index_type=False, - ) - - def test_stata_doc_examples(self): - with tm.ensure_clean() as path: - df = DataFrame( - np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB") - ) - df.to_stata(path) - - def test_write_preserves_original(self): - # 9795 - - df = DataFrame( - np.random.default_rng(2).standard_normal((5, 4)), columns=list("abcd") - ) - df.loc[2, "a":"c"] = np.nan - df_copy = df.copy() - with tm.ensure_clean() as path: - df.to_stata(path, write_index=False) - tm.assert_frame_equal(df, df_copy) - - @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) - def test_encoding(self, version, datapath): - # GH 4626, proper encoding handling - raw = read_stata(datapath("io", "data", "stata", "stata1_encoding.dta")) - encoded = read_stata(datapath("io", "data", "stata", "stata1_encoding.dta")) - result = encoded.kreis1849[0] - - expected = raw.kreis1849[0] - assert result == expected - assert isinstance(result, str) - - with tm.ensure_clean() as path: - encoded.to_stata(path, write_index=False, version=version) - reread_encoded = read_stata(path) - tm.assert_frame_equal(encoded, reread_encoded) - - def test_read_write_dta11(self): - original = DataFrame( - [(1, 2, 3, 4)], - columns=[ - "good", - "b\u00E4d", - "8number", - "astringwithmorethan32characters______", - ], - ) - formatted = DataFrame( - [(1, 2, 3, 4)], - columns=["good", "b_d", "_8number", "astringwithmorethan32characters_"], - ) - formatted.index.name = "index" - formatted = formatted.astype(np.int32) - - with tm.ensure_clean() as path: - with tm.assert_produces_warning(InvalidColumnName): - original.to_stata(path, convert_dates=None) - - written_and_read_again = self.read_dta(path) - - expected = formatted.copy() - expected.index = expected.index.astype(np.int32) - tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) - - @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) - def test_read_write_dta12(self, version): - original = DataFrame( - [(1, 2, 3, 4, 5, 6)], - columns=[ - "astringwithmorethan32characters_1", - "astringwithmorethan32characters_2", - "+", - "-", - "short", - "delete", - ], - ) - formatted = DataFrame( - [(1, 2, 3, 4, 5, 6)], - columns=[ - "astringwithmorethan32characters_", - "_0astringwithmorethan32character", - "_", - "_1_", - "_short", - "_delete", - ], - ) - formatted.index.name = "index" - formatted = formatted.astype(np.int32) - - with tm.ensure_clean() as path: - with tm.assert_produces_warning(InvalidColumnName): - original.to_stata(path, convert_dates=None, version=version) - # should get a warning for that format. - - written_and_read_again = self.read_dta(path) - - expected = formatted.copy() - expected.index = expected.index.astype(np.int32) - tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) - - def test_read_write_dta13(self): - s1 = Series(2**9, dtype=np.int16) - s2 = Series(2**17, dtype=np.int32) - s3 = Series(2**33, dtype=np.int64) - original = DataFrame({"int16": s1, "int32": s2, "int64": s3}) - original.index.name = "index" - - formatted = original - formatted["int64"] = formatted["int64"].astype(np.float64) - - with tm.ensure_clean() as path: - original.to_stata(path) - written_and_read_again = self.read_dta(path) - - expected = formatted.copy() - expected.index = expected.index.astype(np.int32) - tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) - - @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) - @pytest.mark.parametrize( - "file", ["stata5_113", "stata5_114", "stata5_115", "stata5_117"] - ) - def test_read_write_reread_dta14(self, file, parsed_114, version, datapath): - file = datapath("io", "data", "stata", f"{file}.dta") - parsed = self.read_dta(file) - parsed.index.name = "index" - - tm.assert_frame_equal(parsed_114, parsed) - - with tm.ensure_clean() as path: - parsed_114.to_stata(path, convert_dates={"date_td": "td"}, version=version) - written_and_read_again = self.read_dta(path) - - expected = parsed_114.copy() - expected.index = expected.index.astype(np.int32) - tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) - - @pytest.mark.parametrize( - "file", ["stata6_113", "stata6_114", "stata6_115", "stata6_117"] - ) - def test_read_write_reread_dta15(self, file, datapath): - expected = self.read_csv(datapath("io", "data", "stata", "stata6.csv")) - expected["byte_"] = expected["byte_"].astype(np.int8) - expected["int_"] = expected["int_"].astype(np.int16) - expected["long_"] = expected["long_"].astype(np.int32) - expected["float_"] = expected["float_"].astype(np.float32) - expected["double_"] = expected["double_"].astype(np.float64) - expected["date_td"] = expected["date_td"].apply( - datetime.strptime, args=("%Y-%m-%d",) - ) - - file = datapath("io", "data", "stata", f"{file}.dta") - parsed = self.read_dta(file) - - tm.assert_frame_equal(expected, parsed) - - @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) - def test_timestamp_and_label(self, version): - original = DataFrame([(1,)], columns=["variable"]) - time_stamp = datetime(2000, 2, 29, 14, 21) - data_label = "This is a data file." - with tm.ensure_clean() as path: - original.to_stata( - path, time_stamp=time_stamp, data_label=data_label, version=version - ) - - with StataReader(path) as reader: - assert reader.time_stamp == "29 Feb 2000 14:21" - assert reader.data_label == data_label - - @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) - def test_invalid_timestamp(self, version): - original = DataFrame([(1,)], columns=["variable"]) - time_stamp = "01 Jan 2000, 00:00:00" - with tm.ensure_clean() as path: - msg = "time_stamp should be datetime type" - with pytest.raises(ValueError, match=msg): - original.to_stata(path, time_stamp=time_stamp, version=version) - assert not os.path.isfile(path) - - def test_numeric_column_names(self): - original = DataFrame(np.reshape(np.arange(25.0), (5, 5))) - original.index.name = "index" - with tm.ensure_clean() as path: - # should get a warning for that format. - with tm.assert_produces_warning(InvalidColumnName): - original.to_stata(path) - - written_and_read_again = self.read_dta(path) - - written_and_read_again = written_and_read_again.set_index("index") - columns = list(written_and_read_again.columns) - convert_col_name = lambda x: int(x[1]) - written_and_read_again.columns = map(convert_col_name, columns) - - expected = original.copy() - expected.index = expected.index.astype(np.int32) - tm.assert_frame_equal(expected, written_and_read_again) - - @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) - def test_nan_to_missing_value(self, version): - s1 = Series(np.arange(4.0), dtype=np.float32) - s2 = Series(np.arange(4.0), dtype=np.float64) - s1[::2] = np.nan - s2[1::2] = np.nan - original = DataFrame({"s1": s1, "s2": s2}) - original.index.name = "index" - - with tm.ensure_clean() as path: - original.to_stata(path, version=version) - written_and_read_again = self.read_dta(path) - - written_and_read_again = written_and_read_again.set_index("index") - expected = original.copy() - expected.index = expected.index.astype(np.int32) - tm.assert_frame_equal(written_and_read_again, expected) - - def test_no_index(self): - columns = ["x", "y"] - original = DataFrame(np.reshape(np.arange(10.0), (5, 2)), columns=columns) - original.index.name = "index_not_written" - with tm.ensure_clean() as path: - original.to_stata(path, write_index=False) - written_and_read_again = self.read_dta(path) - with pytest.raises(KeyError, match=original.index.name): - written_and_read_again["index_not_written"] - - def test_string_no_dates(self): - s1 = Series(["a", "A longer string"]) - s2 = Series([1.0, 2.0], dtype=np.float64) - original = DataFrame({"s1": s1, "s2": s2}) - original.index.name = "index" - with tm.ensure_clean() as path: - original.to_stata(path) - written_and_read_again = self.read_dta(path) - - expected = original.copy() - expected.index = expected.index.astype(np.int32) - tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) - - def test_large_value_conversion(self): - s0 = Series([1, 99], dtype=np.int8) - s1 = Series([1, 127], dtype=np.int8) - s2 = Series([1, 2**15 - 1], dtype=np.int16) - s3 = Series([1, 2**63 - 1], dtype=np.int64) - original = DataFrame({"s0": s0, "s1": s1, "s2": s2, "s3": s3}) - original.index.name = "index" - with tm.ensure_clean() as path: - with tm.assert_produces_warning(PossiblePrecisionLoss): - original.to_stata(path) - - written_and_read_again = self.read_dta(path) - - modified = original.copy() - modified["s1"] = Series(modified["s1"], dtype=np.int16) - modified["s2"] = Series(modified["s2"], dtype=np.int32) - modified["s3"] = Series(modified["s3"], dtype=np.float64) - modified.index = original.index.astype(np.int32) - tm.assert_frame_equal(written_and_read_again.set_index("index"), modified) - - def test_dates_invalid_column(self): - original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)]) - original.index.name = "index" - with tm.ensure_clean() as path: - with tm.assert_produces_warning(InvalidColumnName): - original.to_stata(path, convert_dates={0: "tc"}) - - written_and_read_again = self.read_dta(path) - - modified = original.copy() - modified.columns = ["_0"] - modified.index = original.index.astype(np.int32) - tm.assert_frame_equal(written_and_read_again.set_index("index"), modified) - - def test_105(self, datapath): - # Data obtained from: - # http://go.worldbank.org/ZXY29PVJ21 - dpath = datapath("io", "data", "stata", "S4_EDUC1.dta") - df = read_stata(dpath) - df0 = [[1, 1, 3, -2], [2, 1, 2, -2], [4, 1, 1, -2]] - df0 = DataFrame(df0) - df0.columns = ["clustnum", "pri_schl", "psch_num", "psch_dis"] - df0["clustnum"] = df0["clustnum"].astype(np.int16) - df0["pri_schl"] = df0["pri_schl"].astype(np.int8) - df0["psch_num"] = df0["psch_num"].astype(np.int8) - df0["psch_dis"] = df0["psch_dis"].astype(np.float32) - tm.assert_frame_equal(df.head(3), df0) - - def test_value_labels_old_format(self, datapath): - # GH 19417 - # - # Test that value_labels() returns an empty dict if the file format - # predates supporting value labels. - dpath = datapath("io", "data", "stata", "S4_EDUC1.dta") - with StataReader(dpath) as reader: - assert reader.value_labels() == {} - - def test_date_export_formats(self): - columns = ["tc", "td", "tw", "tm", "tq", "th", "ty"] - conversions = {c: c for c in columns} - data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns) - original = DataFrame([data], columns=columns) - original.index.name = "index" - expected_values = [ - datetime(2006, 11, 20, 23, 13, 20), # Time - datetime(2006, 11, 20), # Day - datetime(2006, 11, 19), # Week - datetime(2006, 11, 1), # Month - datetime(2006, 10, 1), # Quarter year - datetime(2006, 7, 1), # Half year - datetime(2006, 1, 1), - ] # Year - - expected = DataFrame( - [expected_values], - index=pd.Index([0], dtype=np.int32, name="index"), - columns=columns, - ) - - with tm.ensure_clean() as path: - original.to_stata(path, convert_dates=conversions) - written_and_read_again = self.read_dta(path) - - tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) - - def test_write_missing_strings(self): - original = DataFrame([["1"], [None]], columns=["foo"]) - - expected = DataFrame( - [["1"], [""]], - index=pd.Index([0, 1], dtype=np.int32, name="index"), - columns=["foo"], - ) - - with tm.ensure_clean() as path: - original.to_stata(path) - written_and_read_again = self.read_dta(path) - - tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) - - @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) - @pytest.mark.parametrize("byteorder", [">", "<"]) - def test_bool_uint(self, byteorder, version): - s0 = Series([0, 1, True], dtype=np.bool_) - s1 = Series([0, 1, 100], dtype=np.uint8) - s2 = Series([0, 1, 255], dtype=np.uint8) - s3 = Series([0, 1, 2**15 - 100], dtype=np.uint16) - s4 = Series([0, 1, 2**16 - 1], dtype=np.uint16) - s5 = Series([0, 1, 2**31 - 100], dtype=np.uint32) - s6 = Series([0, 1, 2**32 - 1], dtype=np.uint32) - - original = DataFrame( - {"s0": s0, "s1": s1, "s2": s2, "s3": s3, "s4": s4, "s5": s5, "s6": s6} - ) - original.index.name = "index" - expected = original.copy() - expected.index = original.index.astype(np.int32) - expected_types = ( - np.int8, - np.int8, - np.int16, - np.int16, - np.int32, - np.int32, - np.float64, - ) - for c, t in zip(expected.columns, expected_types): - expected[c] = expected[c].astype(t) - - with tm.ensure_clean() as path: - original.to_stata(path, byteorder=byteorder, version=version) - written_and_read_again = self.read_dta(path) - - written_and_read_again = written_and_read_again.set_index("index") - tm.assert_frame_equal(written_and_read_again, expected) - - def test_variable_labels(self, datapath): - with StataReader(datapath("io", "data", "stata", "stata7_115.dta")) as rdr: - sr_115 = rdr.variable_labels() - with StataReader(datapath("io", "data", "stata", "stata7_117.dta")) as rdr: - sr_117 = rdr.variable_labels() - keys = ("var1", "var2", "var3") - labels = ("label1", "label2", "label3") - for k, v in sr_115.items(): - assert k in sr_117 - assert v == sr_117[k] - assert k in keys - assert v in labels - - def test_minimal_size_col(self): - str_lens = (1, 100, 244) - s = {} - for str_len in str_lens: - s["s" + str(str_len)] = Series( - ["a" * str_len, "b" * str_len, "c" * str_len] - ) - original = DataFrame(s) - with tm.ensure_clean() as path: - original.to_stata(path, write_index=False) - - with StataReader(path) as sr: - sr._ensure_open() # The `_*list` variables are initialized here - for variable, fmt, typ in zip(sr._varlist, sr._fmtlist, sr._typlist): - assert int(variable[1:]) == int(fmt[1:-1]) - assert int(variable[1:]) == typ - - def test_excessively_long_string(self): - str_lens = (1, 244, 500) - s = {} - for str_len in str_lens: - s["s" + str(str_len)] = Series( - ["a" * str_len, "b" * str_len, "c" * str_len] - ) - original = DataFrame(s) - msg = ( - r"Fixed width strings in Stata \.dta files are limited to 244 " - r"\(or fewer\)\ncharacters\. Column 's500' does not satisfy " - r"this restriction\. Use the\n'version=117' parameter to write " - r"the newer \(Stata 13 and later\) format\." - ) - with pytest.raises(ValueError, match=msg): - with tm.ensure_clean() as path: - original.to_stata(path) - - def test_missing_value_generator(self): - types = ("b", "h", "l") - df = DataFrame([[0.0]], columns=["float_"]) - with tm.ensure_clean() as path: - df.to_stata(path) - with StataReader(path) as rdr: - valid_range = rdr.VALID_RANGE - expected_values = ["." + chr(97 + i) for i in range(26)] - expected_values.insert(0, ".") - for t in types: - offset = valid_range[t][1] - for i in range(0, 27): - val = StataMissingValue(offset + 1 + i) - assert val.string == expected_values[i] - - # Test extremes for floats - val = StataMissingValue(struct.unpack(" DataFrame: - """ - Emulate the categorical casting behavior we expect from roundtripping. - """ - for col in from_frame: - ser = from_frame[col] - if isinstance(ser.dtype, CategoricalDtype): - cat = ser._values.remove_unused_categories() - if cat.categories.dtype == object: - categories = pd.Index._with_infer(cat.categories._values) - cat = cat.set_categories(categories) - from_frame[col] = cat - return from_frame - - def test_iterator(self, datapath): - fname = datapath("io", "data", "stata", "stata3_117.dta") - - parsed = read_stata(fname) - - with read_stata(fname, iterator=True) as itr: - chunk = itr.read(5) - tm.assert_frame_equal(parsed.iloc[0:5, :], chunk) - - with read_stata(fname, chunksize=5) as itr: - chunk = list(itr) - tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0]) - - with read_stata(fname, iterator=True) as itr: - chunk = itr.get_chunk(5) - tm.assert_frame_equal(parsed.iloc[0:5, :], chunk) - - with read_stata(fname, chunksize=5) as itr: - chunk = itr.get_chunk() - tm.assert_frame_equal(parsed.iloc[0:5, :], chunk) - - # GH12153 - with read_stata(fname, chunksize=4) as itr: - from_chunks = pd.concat(itr) - tm.assert_frame_equal(parsed, from_chunks) - - @pytest.mark.filterwarnings("ignore::UserWarning") - @pytest.mark.parametrize( - "file", - [ - "stata2_115", - "stata3_115", - "stata4_115", - "stata5_115", - "stata6_115", - "stata7_115", - "stata8_115", - "stata9_115", - "stata10_115", - "stata11_115", - ], - ) - @pytest.mark.parametrize("chunksize", [1, 2]) - @pytest.mark.parametrize("convert_categoricals", [False, True]) - @pytest.mark.parametrize("convert_dates", [False, True]) - def test_read_chunks_115( - self, file, chunksize, convert_categoricals, convert_dates, datapath - ): - fname = datapath("io", "data", "stata", f"{file}.dta") - - # Read the whole file - parsed = read_stata( - fname, - convert_categoricals=convert_categoricals, - convert_dates=convert_dates, - ) - - # Compare to what we get when reading by chunk - with read_stata( - fname, - iterator=True, - convert_dates=convert_dates, - convert_categoricals=convert_categoricals, - ) as itr: - pos = 0 - for j in range(5): - try: - chunk = itr.read(chunksize) - except StopIteration: - break - from_frame = parsed.iloc[pos : pos + chunksize, :].copy() - from_frame = self._convert_categorical(from_frame) - tm.assert_frame_equal( - from_frame, chunk, check_dtype=False, check_datetimelike_compat=True - ) - pos += chunksize - - def test_read_chunks_columns(self, datapath): - fname = datapath("io", "data", "stata", "stata3_117.dta") - columns = ["quarter", "cpi", "m1"] - chunksize = 2 - - parsed = read_stata(fname, columns=columns) - with read_stata(fname, iterator=True) as itr: - pos = 0 - for j in range(5): - chunk = itr.read(chunksize, columns=columns) - if chunk is None: - break - from_frame = parsed.iloc[pos : pos + chunksize, :] - tm.assert_frame_equal(from_frame, chunk, check_dtype=False) - pos += chunksize - - @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) - def test_write_variable_labels(self, version, mixed_frame): - # GH 13631, add support for writing variable labels - mixed_frame.index.name = "index" - variable_labels = {"a": "City Rank", "b": "City Exponent", "c": "City"} - with tm.ensure_clean() as path: - mixed_frame.to_stata(path, variable_labels=variable_labels, version=version) - with StataReader(path) as sr: - read_labels = sr.variable_labels() - expected_labels = { - "index": "", - "a": "City Rank", - "b": "City Exponent", - "c": "City", - } - assert read_labels == expected_labels - - variable_labels["index"] = "The Index" - with tm.ensure_clean() as path: - mixed_frame.to_stata(path, variable_labels=variable_labels, version=version) - with StataReader(path) as sr: - read_labels = sr.variable_labels() - assert read_labels == variable_labels - - @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) - def test_invalid_variable_labels(self, version, mixed_frame): - mixed_frame.index.name = "index" - variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"} - with tm.ensure_clean() as path: - msg = "Variable labels must be 80 characters or fewer" - with pytest.raises(ValueError, match=msg): - mixed_frame.to_stata( - path, variable_labels=variable_labels, version=version - ) - - @pytest.mark.parametrize("version", [114, 117]) - def test_invalid_variable_label_encoding(self, version, mixed_frame): - mixed_frame.index.name = "index" - variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"} - variable_labels["a"] = "invalid character Œ" - with tm.ensure_clean() as path: - with pytest.raises( - ValueError, match="Variable labels must contain only characters" - ): - mixed_frame.to_stata( - path, variable_labels=variable_labels, version=version - ) - - def test_write_variable_label_errors(self, mixed_frame): - values = ["\u03A1", "\u0391", "\u039D", "\u0394", "\u0391", "\u03A3"] - - variable_labels_utf8 = { - "a": "City Rank", - "b": "City Exponent", - "c": "".join(values), - } - - msg = ( - "Variable labels must contain only characters that can be " - "encoded in Latin-1" - ) - with pytest.raises(ValueError, match=msg): - with tm.ensure_clean() as path: - mixed_frame.to_stata(path, variable_labels=variable_labels_utf8) - - variable_labels_long = { - "a": "City Rank", - "b": "City Exponent", - "c": "A very, very, very long variable label " - "that is too long for Stata which means " - "that it has more than 80 characters", - } - - msg = "Variable labels must be 80 characters or fewer" - with pytest.raises(ValueError, match=msg): - with tm.ensure_clean() as path: - mixed_frame.to_stata(path, variable_labels=variable_labels_long) - - def test_default_date_conversion(self): - # GH 12259 - dates = [ - dt.datetime(1999, 12, 31, 12, 12, 12, 12000), - dt.datetime(2012, 12, 21, 12, 21, 12, 21000), - dt.datetime(1776, 7, 4, 7, 4, 7, 4000), - ] - original = DataFrame( - { - "nums": [1.0, 2.0, 3.0], - "strs": ["apple", "banana", "cherry"], - "dates": dates, - } - ) - - with tm.ensure_clean() as path: - original.to_stata(path, write_index=False) - reread = read_stata(path, convert_dates=True) - tm.assert_frame_equal(original, reread) - - original.to_stata(path, write_index=False, convert_dates={"dates": "tc"}) - direct = read_stata(path, convert_dates=True) - tm.assert_frame_equal(reread, direct) - - dates_idx = original.columns.tolist().index("dates") - original.to_stata(path, write_index=False, convert_dates={dates_idx: "tc"}) - direct = read_stata(path, convert_dates=True) - tm.assert_frame_equal(reread, direct) - - def test_unsupported_type(self): - original = DataFrame({"a": [1 + 2j, 2 + 4j]}) - - msg = "Data type complex128 not supported" - with pytest.raises(NotImplementedError, match=msg): - with tm.ensure_clean() as path: - original.to_stata(path) - - def test_unsupported_datetype(self): - dates = [ - dt.datetime(1999, 12, 31, 12, 12, 12, 12000), - dt.datetime(2012, 12, 21, 12, 21, 12, 21000), - dt.datetime(1776, 7, 4, 7, 4, 7, 4000), - ] - original = DataFrame( - { - "nums": [1.0, 2.0, 3.0], - "strs": ["apple", "banana", "cherry"], - "dates": dates, - } - ) - - msg = "Format %tC not implemented" - with pytest.raises(NotImplementedError, match=msg): - with tm.ensure_clean() as path: - original.to_stata(path, convert_dates={"dates": "tC"}) - - dates = pd.date_range("1-1-1990", periods=3, tz="Asia/Hong_Kong") - original = DataFrame( - { - "nums": [1.0, 2.0, 3.0], - "strs": ["apple", "banana", "cherry"], - "dates": dates, - } - ) - with pytest.raises(NotImplementedError, match="Data type datetime64"): - with tm.ensure_clean() as path: - original.to_stata(path) - - def test_repeated_column_labels(self, datapath): - # GH 13923, 25772 - msg = """ -Value labels for column ethnicsn are not unique. These cannot be converted to -pandas categoricals. - -Either read the file with `convert_categoricals` set to False or use the -low level interface in `StataReader` to separately read the values and the -value_labels. - -The repeated labels are:\n-+\nwolof -""" - with pytest.raises(ValueError, match=msg): - read_stata( - datapath("io", "data", "stata", "stata15.dta"), - convert_categoricals=True, - ) - - def test_stata_111(self, datapath): - # 111 is an old version but still used by current versions of - # SAS when exporting to Stata format. We do not know of any - # on-line documentation for this version. - df = read_stata(datapath("io", "data", "stata", "stata7_111.dta")) - original = DataFrame( - { - "y": [1, 1, 1, 1, 1, 0, 0, np.nan, 0, 0], - "x": [1, 2, 1, 3, np.nan, 4, 3, 5, 1, 6], - "w": [2, np.nan, 5, 2, 4, 4, 3, 1, 2, 3], - "z": ["a", "b", "c", "d", "e", "", "g", "h", "i", "j"], - } - ) - original = original[["y", "x", "w", "z"]] - tm.assert_frame_equal(original, df) - - def test_out_of_range_double(self): - # GH 14618 - df = DataFrame( - { - "ColumnOk": [0.0, np.finfo(np.double).eps, 4.49423283715579e307], - "ColumnTooBig": [0.0, np.finfo(np.double).eps, np.finfo(np.double).max], - } - ) - msg = ( - r"Column ColumnTooBig has a maximum value \(.+\) outside the range " - r"supported by Stata \(.+\)" - ) - with pytest.raises(ValueError, match=msg): - with tm.ensure_clean() as path: - df.to_stata(path) - - def test_out_of_range_float(self): - original = DataFrame( - { - "ColumnOk": [ - 0.0, - np.finfo(np.float32).eps, - np.finfo(np.float32).max / 10.0, - ], - "ColumnTooBig": [ - 0.0, - np.finfo(np.float32).eps, - np.finfo(np.float32).max, - ], - } - ) - original.index.name = "index" - for col in original: - original[col] = original[col].astype(np.float32) - - with tm.ensure_clean() as path: - original.to_stata(path) - reread = read_stata(path) - - original["ColumnTooBig"] = original["ColumnTooBig"].astype(np.float64) - expected = original.copy() - expected.index = expected.index.astype(np.int32) - tm.assert_frame_equal(reread.set_index("index"), expected) - - @pytest.mark.parametrize("infval", [np.inf, -np.inf]) - def test_inf(self, infval): - # GH 45350 - df = DataFrame({"WithoutInf": [0.0, 1.0], "WithInf": [2.0, infval]}) - msg = ( - "Column WithInf contains infinity or -infinity" - "which is outside the range supported by Stata." - ) - with pytest.raises(ValueError, match=msg): - with tm.ensure_clean() as path: - df.to_stata(path) - - def test_path_pathlib(self): - df = tm.makeDataFrame() - df.index.name = "index" - reader = lambda x: read_stata(x).set_index("index") - result = tm.round_trip_pathlib(df.to_stata, reader) - tm.assert_frame_equal(df, result) - - def test_pickle_path_localpath(self): - df = tm.makeDataFrame() - df.index.name = "index" - reader = lambda x: read_stata(x).set_index("index") - result = tm.round_trip_localpath(df.to_stata, reader) - tm.assert_frame_equal(df, result) - - @pytest.mark.parametrize("write_index", [True, False]) - def test_value_labels_iterator(self, write_index): - # GH 16923 - d = {"A": ["B", "E", "C", "A", "E"]} - df = DataFrame(data=d) - df["A"] = df["A"].astype("category") - with tm.ensure_clean() as path: - df.to_stata(path, write_index=write_index) - - with read_stata(path, iterator=True) as dta_iter: - value_labels = dta_iter.value_labels() - assert value_labels == {"A": {0: "A", 1: "B", 2: "C", 3: "E"}} - - def test_set_index(self): - # GH 17328 - df = tm.makeDataFrame() - df.index.name = "index" - with tm.ensure_clean() as path: - df.to_stata(path) - reread = read_stata(path, index_col="index") - tm.assert_frame_equal(df, reread) - - @pytest.mark.parametrize( - "column", ["ms", "day", "week", "month", "qtr", "half", "yr"] - ) - def test_date_parsing_ignores_format_details(self, column, datapath): - # GH 17797 - # - # Test that display formats are ignored when determining if a numeric - # column is a date value. - # - # All date types are stored as numbers and format associated with the - # column denotes both the type of the date and the display format. - # - # STATA supports 9 date types which each have distinct units. We test 7 - # of the 9 types, ignoring %tC and %tb. %tC is a variant of %tc that - # accounts for leap seconds and %tb relies on STATAs business calendar. - df = read_stata(datapath("io", "data", "stata", "stata13_dates.dta")) - unformatted = df.loc[0, column] - formatted = df.loc[0, column + "_fmt"] - assert unformatted == formatted - - def test_writer_117(self): - original = DataFrame( - data=[ - [ - "string", - "object", - 1, - 1, - 1, - 1.1, - 1.1, - np.datetime64("2003-12-25"), - "a", - "a" * 2045, - "a" * 5000, - "a", - ], - [ - "string-1", - "object-1", - 1, - 1, - 1, - 1.1, - 1.1, - np.datetime64("2003-12-26"), - "b", - "b" * 2045, - "", - "", - ], - ], - columns=[ - "string", - "object", - "int8", - "int16", - "int32", - "float32", - "float64", - "datetime", - "s1", - "s2045", - "srtl", - "forced_strl", - ], - ) - original["object"] = Series(original["object"], dtype=object) - original["int8"] = Series(original["int8"], dtype=np.int8) - original["int16"] = Series(original["int16"], dtype=np.int16) - original["int32"] = original["int32"].astype(np.int32) - original["float32"] = Series(original["float32"], dtype=np.float32) - original.index.name = "index" - original.index = original.index.astype(np.int32) - copy = original.copy() - with tm.ensure_clean() as path: - original.to_stata( - path, - convert_dates={"datetime": "tc"}, - convert_strl=["forced_strl"], - version=117, - ) - written_and_read_again = self.read_dta(path) - # original.index is np.int32, read index is np.int64 - tm.assert_frame_equal( - written_and_read_again.set_index("index"), - original, - check_index_type=False, - ) - tm.assert_frame_equal(original, copy) - - def test_convert_strl_name_swap(self): - original = DataFrame( - [["a" * 3000, "A", "apple"], ["b" * 1000, "B", "banana"]], - columns=["long1" * 10, "long", 1], - ) - original.index.name = "index" - - with tm.assert_produces_warning(InvalidColumnName): - with tm.ensure_clean() as path: - original.to_stata(path, convert_strl=["long", 1], version=117) - reread = self.read_dta(path) - reread = reread.set_index("index") - reread.columns = original.columns - tm.assert_frame_equal(reread, original, check_index_type=False) - - def test_invalid_date_conversion(self): - # GH 12259 - dates = [ - dt.datetime(1999, 12, 31, 12, 12, 12, 12000), - dt.datetime(2012, 12, 21, 12, 21, 12, 21000), - dt.datetime(1776, 7, 4, 7, 4, 7, 4000), - ] - original = DataFrame( - { - "nums": [1.0, 2.0, 3.0], - "strs": ["apple", "banana", "cherry"], - "dates": dates, - } - ) - - with tm.ensure_clean() as path: - msg = "convert_dates key must be a column or an integer" - with pytest.raises(ValueError, match=msg): - original.to_stata(path, convert_dates={"wrong_name": "tc"}) - - @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) - def test_nonfile_writing(self, version): - # GH 21041 - bio = io.BytesIO() - df = tm.makeDataFrame() - df.index.name = "index" - with tm.ensure_clean() as path: - df.to_stata(bio, version=version) - bio.seek(0) - with open(path, "wb") as dta: - dta.write(bio.read()) - reread = read_stata(path, index_col="index") - tm.assert_frame_equal(df, reread) - - def test_gzip_writing(self): - # writing version 117 requires seek and cannot be used with gzip - df = tm.makeDataFrame() - df.index.name = "index" - with tm.ensure_clean() as path: - with gzip.GzipFile(path, "wb") as gz: - df.to_stata(gz, version=114) - with gzip.GzipFile(path, "rb") as gz: - reread = read_stata(gz, index_col="index") - tm.assert_frame_equal(df, reread) - - def test_unicode_dta_118(self, datapath): - unicode_df = self.read_dta(datapath("io", "data", "stata", "stata16_118.dta")) - - columns = ["utf8", "latin1", "ascii", "utf8_strl", "ascii_strl"] - values = [ - ["ραηδας", "PÄNDÄS", "p", "ραηδας", "p"], - ["ƤĀńĐąŜ", "Ö", "a", "ƤĀńĐąŜ", "a"], - ["ᴘᴀᴎᴅᴀS", "Ü", "n", "ᴘᴀᴎᴅᴀS", "n"], - [" ", " ", "d", " ", "d"], - [" ", "", "a", " ", "a"], - ["", "", "s", "", "s"], - ["", "", " ", "", " "], - ] - expected = DataFrame(values, columns=columns) - - tm.assert_frame_equal(unicode_df, expected) - - def test_mixed_string_strl(self): - # GH 23633 - output = [{"mixed": "string" * 500, "number": 0}, {"mixed": None, "number": 1}] - output = DataFrame(output) - output.number = output.number.astype("int32") - - with tm.ensure_clean() as path: - output.to_stata(path, write_index=False, version=117) - reread = read_stata(path) - expected = output.fillna("") - tm.assert_frame_equal(reread, expected) - - # Check strl supports all None (null) - output["mixed"] = None - output.to_stata( - path, write_index=False, convert_strl=["mixed"], version=117 - ) - reread = read_stata(path) - expected = output.fillna("") - tm.assert_frame_equal(reread, expected) - - @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) - def test_all_none_exception(self, version): - output = [{"none": "none", "number": 0}, {"none": None, "number": 1}] - output = DataFrame(output) - output["none"] = None - with tm.ensure_clean() as path: - with pytest.raises(ValueError, match="Column `none` cannot be exported"): - output.to_stata(path, version=version) - - @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) - def test_invalid_file_not_written(self, version): - content = "Here is one __�__ Another one __·__ Another one __½__" - df = DataFrame([content], columns=["invalid"]) - with tm.ensure_clean() as path: - msg1 = ( - r"'latin-1' codec can't encode character '\\ufffd' " - r"in position 14: ordinal not in range\(256\)" - ) - msg2 = ( - "'ascii' codec can't decode byte 0xef in position 14: " - r"ordinal not in range\(128\)" - ) - with pytest.raises(UnicodeEncodeError, match=f"{msg1}|{msg2}"): - df.to_stata(path) - - def test_strl_latin1(self): - # GH 23573, correct GSO data to reflect correct size - output = DataFrame( - [["pandas"] * 2, ["þâÑÐŧ"] * 2], columns=["var_str", "var_strl"] - ) - - with tm.ensure_clean() as path: - output.to_stata(path, version=117, convert_strl=["var_strl"]) - with open(path, "rb") as reread: - content = reread.read() - expected = "þâÑÐŧ" - assert expected.encode("latin-1") in content - assert expected.encode("utf-8") in content - gsos = content.split(b"strls")[1][1:-2] - for gso in gsos.split(b"GSO")[1:]: - val = gso.split(b"\x00")[-2] - size = gso[gso.find(b"\x82") + 1] - assert len(val) == size - 1 - - def test_encoding_latin1_118(self, datapath): - # GH 25960 - msg = """ -One or more strings in the dta file could not be decoded using utf-8, and -so the fallback encoding of latin-1 is being used. This can happen when a file -has been incorrectly encoded by Stata or some other software. You should verify -the string values returned are correct.""" - # Move path outside of read_stata, or else assert_produces_warning - # will block pytests skip mechanism from triggering (failing the test) - # if the path is not present - path = datapath("io", "data", "stata", "stata1_encoding_118.dta") - with tm.assert_produces_warning(UnicodeWarning, filter_level="once") as w: - encoded = read_stata(path) - # with filter_level="always", produces 151 warnings which can be slow - assert len(w) == 1 - assert w[0].message.args[0] == msg - - expected = DataFrame([["Düsseldorf"]] * 151, columns=["kreis1849"]) - tm.assert_frame_equal(encoded, expected) - - @pytest.mark.slow - def test_stata_119(self, datapath): - # Gzipped since contains 32,999 variables and uncompressed is 20MiB - with gzip.open( - datapath("io", "data", "stata", "stata1_119.dta.gz"), "rb" - ) as gz: - df = read_stata(gz) - assert df.shape == (1, 32999) - assert df.iloc[0, 6] == "A" * 3000 - assert df.iloc[0, 7] == 3.14 - assert df.iloc[0, -1] == 1 - assert df.iloc[0, 0] == pd.Timestamp(datetime(2012, 12, 21, 21, 12, 21)) - - @pytest.mark.parametrize("version", [118, 119, None]) - def test_utf8_writer(self, version): - cat = pd.Categorical(["a", "β", "ĉ"], ordered=True) - data = DataFrame( - [ - [1.0, 1, "ᴬ", "ᴀ relatively long ŝtring"], - [2.0, 2, "ᴮ", ""], - [3.0, 3, "ᴰ", None], - ], - columns=["Å", "β", "ĉ", "strls"], - ) - data["ᴐᴬᵀ"] = cat - variable_labels = { - "Å": "apple", - "β": "ᵈᵉᵊ", - "ĉ": "ᴎტჄႲႳႴႶႺ", - "strls": "Long Strings", - "ᴐᴬᵀ": "", - } - data_label = "ᴅaᵀa-label" - value_labels = {"β": {1: "label", 2: "æøå", 3: "ŋot valid latin-1"}} - data["β"] = data["β"].astype(np.int32) - with tm.ensure_clean() as path: - writer = StataWriterUTF8( - path, - data, - data_label=data_label, - convert_strl=["strls"], - variable_labels=variable_labels, - write_index=False, - version=version, - value_labels=value_labels, - ) - writer.write_file() - reread_encoded = read_stata(path) - # Missing is intentionally converted to empty strl - data["strls"] = data["strls"].fillna("") - # Variable with value labels is reread as categorical - data["β"] = ( - data["β"].replace(value_labels["β"]).astype("category").cat.as_ordered() - ) - tm.assert_frame_equal(data, reread_encoded) - with StataReader(path) as reader: - assert reader.data_label == data_label - assert reader.variable_labels() == variable_labels - - data.to_stata(path, version=version, write_index=False) - reread_to_stata = read_stata(path) - tm.assert_frame_equal(data, reread_to_stata) - - def test_writer_118_exceptions(self): - df = DataFrame(np.zeros((1, 33000), dtype=np.int8)) - with tm.ensure_clean() as path: - with pytest.raises(ValueError, match="version must be either 118 or 119."): - StataWriterUTF8(path, df, version=117) - with tm.ensure_clean() as path: - with pytest.raises(ValueError, match="You must use version 119"): - StataWriterUTF8(path, df, version=118) - - -@pytest.mark.parametrize("version", [105, 108, 111, 113, 114]) -def test_backward_compat(version, datapath): - data_base = datapath("io", "data", "stata") - ref = os.path.join(data_base, "stata-compat-118.dta") - old = os.path.join(data_base, f"stata-compat-{version}.dta") - expected = read_stata(ref) - old_dta = read_stata(old) - tm.assert_frame_equal(old_dta, expected, check_dtype=False) - - -def test_direct_read(datapath, monkeypatch): - file_path = datapath("io", "data", "stata", "stata-compat-118.dta") - - # Test that opening a file path doesn't buffer the file. - with StataReader(file_path) as reader: - # Must not have been buffered to memory - assert not reader.read().empty - assert not isinstance(reader._path_or_buf, io.BytesIO) - - # Test that we use a given fp exactly, if possible. - with open(file_path, "rb") as fp: - with StataReader(fp) as reader: - assert not reader.read().empty - assert reader._path_or_buf is fp - - # Test that we use a given BytesIO exactly, if possible. - with open(file_path, "rb") as fp: - with io.BytesIO(fp.read()) as bio: - with StataReader(bio) as reader: - assert not reader.read().empty - assert reader._path_or_buf is bio - - -def test_statareader_warns_when_used_without_context(datapath): - file_path = datapath("io", "data", "stata", "stata-compat-118.dta") - with tm.assert_produces_warning( - ResourceWarning, - match="without using a context manager", - ): - sr = StataReader(file_path) - sr.read() - with tm.assert_produces_warning( - FutureWarning, - match="is not part of the public API", - ): - sr.close() - - -@pytest.mark.parametrize("version", [114, 117, 118, 119, None]) -@pytest.mark.parametrize("use_dict", [True, False]) -@pytest.mark.parametrize("infer", [True, False]) -def test_compression(compression, version, use_dict, infer, compression_to_extension): - file_name = "dta_inferred_compression.dta" - if compression: - if use_dict: - file_ext = compression - else: - file_ext = compression_to_extension[compression] - file_name += f".{file_ext}" - compression_arg = compression - if infer: - compression_arg = "infer" - if use_dict: - compression_arg = {"method": compression} - - df = DataFrame( - np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB") - ) - df.index.name = "index" - with tm.ensure_clean(file_name) as path: - df.to_stata(path, version=version, compression=compression_arg) - if compression == "gzip": - with gzip.open(path, "rb") as comp: - fp = io.BytesIO(comp.read()) - elif compression == "zip": - with zipfile.ZipFile(path, "r") as comp: - fp = io.BytesIO(comp.read(comp.filelist[0])) - elif compression == "tar": - with tarfile.open(path) as tar: - fp = io.BytesIO(tar.extractfile(tar.getnames()[0]).read()) - elif compression == "bz2": - with bz2.open(path, "rb") as comp: - fp = io.BytesIO(comp.read()) - elif compression == "zstd": - zstd = pytest.importorskip("zstandard") - with zstd.open(path, "rb") as comp: - fp = io.BytesIO(comp.read()) - elif compression == "xz": - lzma = pytest.importorskip("lzma") - with lzma.open(path, "rb") as comp: - fp = io.BytesIO(comp.read()) - elif compression is None: - fp = path - reread = read_stata(fp, index_col="index") - - expected = df.copy() - expected.index = expected.index.astype(np.int32) - tm.assert_frame_equal(reread, expected) - - -@pytest.mark.parametrize("method", ["zip", "infer"]) -@pytest.mark.parametrize("file_ext", [None, "dta", "zip"]) -def test_compression_dict(method, file_ext): - file_name = f"test.{file_ext}" - archive_name = "test.dta" - df = DataFrame( - np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB") - ) - df.index.name = "index" - with tm.ensure_clean(file_name) as path: - compression = {"method": method, "archive_name": archive_name} - df.to_stata(path, compression=compression) - if method == "zip" or file_ext == "zip": - with zipfile.ZipFile(path, "r") as zp: - assert len(zp.filelist) == 1 - assert zp.filelist[0].filename == archive_name - fp = io.BytesIO(zp.read(zp.filelist[0])) - else: - fp = path - reread = read_stata(fp, index_col="index") - - expected = df.copy() - expected.index = expected.index.astype(np.int32) - tm.assert_frame_equal(reread, expected) - - -@pytest.mark.parametrize("version", [114, 117, 118, 119, None]) -def test_chunked_categorical(version): - df = DataFrame({"cats": Series(["a", "b", "a", "b", "c"], dtype="category")}) - df.index.name = "index" - - expected = df.copy() - expected.index = expected.index.astype(np.int32) - - with tm.ensure_clean() as path: - df.to_stata(path, version=version) - with StataReader(path, chunksize=2, order_categoricals=False) as reader: - for i, block in enumerate(reader): - block = block.set_index("index") - assert "cats" in block - tm.assert_series_equal( - block.cats, expected.cats.iloc[2 * i : 2 * (i + 1)] - ) - - -def test_chunked_categorical_partial(datapath): - dta_file = datapath("io", "data", "stata", "stata-dta-partially-labeled.dta") - values = ["a", "b", "a", "b", 3.0] - with StataReader(dta_file, chunksize=2) as reader: - with tm.assert_produces_warning(CategoricalConversionWarning): - for i, block in enumerate(reader): - assert list(block.cats) == values[2 * i : 2 * (i + 1)] - if i < 2: - idx = pd.Index(["a", "b"]) - else: - idx = pd.Index([3.0], dtype="float64") - tm.assert_index_equal(block.cats.cat.categories, idx) - with tm.assert_produces_warning(CategoricalConversionWarning): - with StataReader(dta_file, chunksize=5) as reader: - large_chunk = reader.__next__() - direct = read_stata(dta_file) - tm.assert_frame_equal(direct, large_chunk) - - -@pytest.mark.parametrize("chunksize", (-1, 0, "apple")) -def test_iterator_errors(datapath, chunksize): - dta_file = datapath("io", "data", "stata", "stata-dta-partially-labeled.dta") - with pytest.raises(ValueError, match="chunksize must be a positive"): - with StataReader(dta_file, chunksize=chunksize): - pass - - -def test_iterator_value_labels(): - # GH 31544 - values = ["c_label", "b_label"] + ["a_label"] * 500 - df = DataFrame({f"col{k}": pd.Categorical(values, ordered=True) for k in range(2)}) - with tm.ensure_clean() as path: - df.to_stata(path, write_index=False) - expected = pd.Index(["a_label", "b_label", "c_label"], dtype="object") - with read_stata(path, chunksize=100) as reader: - for j, chunk in enumerate(reader): - for i in range(2): - tm.assert_index_equal(chunk.dtypes.iloc[i].categories, expected) - tm.assert_frame_equal(chunk, df.iloc[j * 100 : (j + 1) * 100]) - - -def test_precision_loss(): - df = DataFrame( - [[sum(2**i for i in range(60)), sum(2**i for i in range(52))]], - columns=["big", "little"], - ) - with tm.ensure_clean() as path: - with tm.assert_produces_warning( - PossiblePrecisionLoss, match="Column converted from int64 to float64" - ): - df.to_stata(path, write_index=False) - reread = read_stata(path) - expected_dt = Series([np.float64, np.float64], index=["big", "little"]) - tm.assert_series_equal(reread.dtypes, expected_dt) - assert reread.loc[0, "little"] == df.loc[0, "little"] - assert reread.loc[0, "big"] == float(df.loc[0, "big"]) - - -def test_compression_roundtrip(compression): - df = DataFrame( - [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], - index=["A", "B"], - columns=["X", "Y", "Z"], - ) - df.index.name = "index" - - with tm.ensure_clean() as path: - df.to_stata(path, compression=compression) - reread = read_stata(path, compression=compression, index_col="index") - tm.assert_frame_equal(df, reread) - - # explicitly ensure file was compressed. - with tm.decompress_file(path, compression) as fh: - contents = io.BytesIO(fh.read()) - reread = read_stata(contents, index_col="index") - tm.assert_frame_equal(df, reread) - - -@pytest.mark.parametrize("to_infer", [True, False]) -@pytest.mark.parametrize("read_infer", [True, False]) -def test_stata_compression( - compression_only, read_infer, to_infer, compression_to_extension -): - compression = compression_only - - ext = compression_to_extension[compression] - filename = f"test.{ext}" - - df = DataFrame( - [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], - index=["A", "B"], - columns=["X", "Y", "Z"], - ) - df.index.name = "index" - - to_compression = "infer" if to_infer else compression - read_compression = "infer" if read_infer else compression - - with tm.ensure_clean(filename) as path: - df.to_stata(path, compression=to_compression) - result = read_stata(path, compression=read_compression, index_col="index") - tm.assert_frame_equal(result, df) - - -def test_non_categorical_value_labels(): - data = DataFrame( - { - "fully_labelled": [1, 2, 3, 3, 1], - "partially_labelled": [1.0, 2.0, np.nan, 9.0, np.nan], - "Y": [7, 7, 9, 8, 10], - "Z": pd.Categorical(["j", "k", "l", "k", "j"]), - } - ) - - with tm.ensure_clean() as path: - value_labels = { - "fully_labelled": {1: "one", 2: "two", 3: "three"}, - "partially_labelled": {1.0: "one", 2.0: "two"}, - } - expected = {**value_labels, "Z": {0: "j", 1: "k", 2: "l"}} - - writer = StataWriter(path, data, value_labels=value_labels) - writer.write_file() - - with StataReader(path) as reader: - reader_value_labels = reader.value_labels() - assert reader_value_labels == expected - - msg = "Can't create value labels for notY, it wasn't found in the dataset." - with pytest.raises(KeyError, match=msg): - value_labels = {"notY": {7: "label1", 8: "label2"}} - StataWriter(path, data, value_labels=value_labels) - - msg = ( - "Can't create value labels for Z, value labels " - "can only be applied to numeric columns." - ) - with pytest.raises(ValueError, match=msg): - value_labels = {"Z": {1: "a", 2: "k", 3: "j", 4: "i"}} - StataWriter(path, data, value_labels=value_labels) - - -def test_non_categorical_value_label_name_conversion(): - # Check conversion of invalid variable names - data = DataFrame( - { - "invalid~!": [1, 1, 2, 3, 5, 8], # Only alphanumeric and _ - "6_invalid": [1, 1, 2, 3, 5, 8], # Must start with letter or _ - "invalid_name_longer_than_32_characters": [8, 8, 9, 9, 8, 8], # Too long - "aggregate": [2, 5, 5, 6, 6, 9], # Reserved words - (1, 2): [1, 2, 3, 4, 5, 6], # Hashable non-string - } - ) - - value_labels = { - "invalid~!": {1: "label1", 2: "label2"}, - "6_invalid": {1: "label1", 2: "label2"}, - "invalid_name_longer_than_32_characters": {8: "eight", 9: "nine"}, - "aggregate": {5: "five"}, - (1, 2): {3: "three"}, - } - - expected = { - "invalid__": {1: "label1", 2: "label2"}, - "_6_invalid": {1: "label1", 2: "label2"}, - "invalid_name_longer_than_32_char": {8: "eight", 9: "nine"}, - "_aggregate": {5: "five"}, - "_1__2_": {3: "three"}, - } - - with tm.ensure_clean() as path: - with tm.assert_produces_warning(InvalidColumnName): - data.to_stata(path, value_labels=value_labels) - - with StataReader(path) as reader: - reader_value_labels = reader.value_labels() - assert reader_value_labels == expected - - -def test_non_categorical_value_label_convert_categoricals_error(): - # Mapping more than one value to the same label is valid for Stata - # labels, but can't be read with convert_categoricals=True - value_labels = { - "repeated_labels": {10: "Ten", 20: "More than ten", 40: "More than ten"} - } - - data = DataFrame( - { - "repeated_labels": [10, 10, 20, 20, 40, 40], - } - ) - - with tm.ensure_clean() as path: - data.to_stata(path, value_labels=value_labels) - - with StataReader(path, convert_categoricals=False) as reader: - reader_value_labels = reader.value_labels() - assert reader_value_labels == value_labels - - col = "repeated_labels" - repeats = "-" * 80 + "\n" + "\n".join(["More than ten"]) - - msg = f""" -Value labels for column {col} are not unique. These cannot be converted to -pandas categoricals. - -Either read the file with `convert_categoricals` set to False or use the -low level interface in `StataReader` to separately read the values and the -value_labels. - -The repeated labels are: -{repeats} -""" - with pytest.raises(ValueError, match=msg): - read_stata(path, convert_categoricals=True) - - -@pytest.mark.parametrize("version", [114, 117, 118, 119, None]) -@pytest.mark.parametrize( - "dtype", - [ - pd.BooleanDtype, - pd.Int8Dtype, - pd.Int16Dtype, - pd.Int32Dtype, - pd.Int64Dtype, - pd.UInt8Dtype, - pd.UInt16Dtype, - pd.UInt32Dtype, - pd.UInt64Dtype, - ], -) -def test_nullable_support(dtype, version): - df = DataFrame( - { - "a": Series([1.0, 2.0, 3.0]), - "b": Series([1, pd.NA, pd.NA], dtype=dtype.name), - "c": Series(["a", "b", None]), - } - ) - dtype_name = df.b.dtype.numpy_dtype.name - # Only use supported names: no uint, bool or int64 - dtype_name = dtype_name.replace("u", "") - if dtype_name == "int64": - dtype_name = "int32" - elif dtype_name == "bool": - dtype_name = "int8" - value = StataMissingValue.BASE_MISSING_VALUES[dtype_name] - smv = StataMissingValue(value) - expected_b = Series([1, smv, smv], dtype=object, name="b") - expected_c = Series(["a", "b", ""], name="c") - with tm.ensure_clean() as path: - df.to_stata(path, write_index=False, version=version) - reread = read_stata(path, convert_missing=True) - tm.assert_series_equal(df.a, reread.a) - tm.assert_series_equal(reread.b, expected_b) - tm.assert_series_equal(reread.c, expected_c) - - -def test_empty_frame(): - # GH 46240 - # create an empty DataFrame with int64 and float64 dtypes - df = DataFrame(data={"a": range(3), "b": [1.0, 2.0, 3.0]}).head(0) - with tm.ensure_clean() as path: - df.to_stata(path, write_index=False, version=117) - # Read entire dataframe - df2 = read_stata(path) - assert "b" in df2 - # Dtypes don't match since no support for int32 - dtypes = Series({"a": np.dtype("int32"), "b": np.dtype("float64")}) - tm.assert_series_equal(df2.dtypes, dtypes) - # read one column of empty .dta file - df3 = read_stata(path, columns=["a"]) - assert "b" not in df3 - tm.assert_series_equal(df3.dtypes, dtypes.loc[["a"]]) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/certifi/core.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/certifi/core.py deleted file mode 100644 index b8140cf1ae7cd6d84a484668608ec6226db20e37..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/certifi/core.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -certifi.py -~~~~~~~~~~ - -This module returns the installation location of cacert.pem or its contents. -""" -import os - - -class _PipPatchedCertificate(Exception): - pass - - -try: - # Return a certificate file on disk for a standalone pip zipapp running in - # an isolated build environment to use. Passing --cert to the standalone - # pip does not work since requests calls where() unconditionally on import. - _PIP_STANDALONE_CERT = os.environ.get("_PIP_STANDALONE_CERT") - if _PIP_STANDALONE_CERT: - def where(): - return _PIP_STANDALONE_CERT - raise _PipPatchedCertificate() - - from importlib.resources import path as get_path, read_text - - _CACERT_CTX = None - _CACERT_PATH = None - - def where(): - # This is slightly terrible, but we want to delay extracting the file - # in cases where we're inside of a zipimport situation until someone - # actually calls where(), but we don't want to re-extract the file - # on every call of where(), so we'll do it once then store it in a - # global variable. - global _CACERT_CTX - global _CACERT_PATH - if _CACERT_PATH is None: - # This is slightly janky, the importlib.resources API wants you to - # manage the cleanup of this file, so it doesn't actually return a - # path, it returns a context manager that will give you the path - # when you enter it and will do any cleanup when you leave it. In - # the common case of not needing a temporary file, it will just - # return the file system location and the __exit__() is a no-op. - # - # We also have to hold onto the actual context manager, because - # it will do the cleanup whenever it gets garbage collected, so - # we will also store that at the global level as well. - _CACERT_CTX = get_path("pip._vendor.certifi", "cacert.pem") - _CACERT_PATH = str(_CACERT_CTX.__enter__()) - - return _CACERT_PATH - -except _PipPatchedCertificate: - pass - -except ImportError: - # This fallback will work for Python versions prior to 3.7 that lack the - # importlib.resources module but relies on the existing `where` function - # so won't address issues with environments like PyOxidizer that don't set - # __file__ on modules. - def read_text(_module, _path, encoding="ascii"): - with open(where(), "r", encoding=encoding) as data: - return data.read() - - # If we don't have importlib.resources, then we will just do the old logic - # of assuming we're on the filesystem and munge the path directly. - def where(): - f = os.path.dirname(__file__) - - return os.path.join(f, "cacert.pem") - - -def contents(): - return read_text("certifi", "cacert.pem", encoding="ascii") diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/pygments/formatters/svg.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/pygments/formatters/svg.py deleted file mode 100644 index d4de51f0e666734b63b5ebd46fc415fc191d5835..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/pygments/formatters/svg.py +++ /dev/null @@ -1,188 +0,0 @@ -""" - pygments.formatters.svg - ~~~~~~~~~~~~~~~~~~~~~~~ - - Formatter for SVG output. - - :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pip._vendor.pygments.formatter import Formatter -from pip._vendor.pygments.token import Comment -from pip._vendor.pygments.util import get_bool_opt, get_int_opt - -__all__ = ['SvgFormatter'] - - -def escape_html(text): - """Escape &, <, > as well as single and double quotes for HTML.""" - return text.replace('&', '&'). \ - replace('<', '<'). \ - replace('>', '>'). \ - replace('"', '"'). \ - replace("'", ''') - - -class2style = {} - -class SvgFormatter(Formatter): - """ - Format tokens as an SVG graphics file. This formatter is still experimental. - Each line of code is a ```` element with explicit ``x`` and ``y`` - coordinates containing ```` elements with the individual token styles. - - By default, this formatter outputs a full SVG document including doctype - declaration and the ```` root element. - - .. versionadded:: 0.9 - - Additional options accepted: - - `nowrap` - Don't wrap the SVG ```` elements in ```` elements and - don't add a XML declaration and a doctype. If true, the `fontfamily` - and `fontsize` options are ignored. Defaults to ``False``. - - `fontfamily` - The value to give the wrapping ```` element's ``font-family`` - attribute, defaults to ``"monospace"``. - - `fontsize` - The value to give the wrapping ```` element's ``font-size`` - attribute, defaults to ``"14px"``. - - `linenos` - If ``True``, add line numbers (default: ``False``). - - `linenostart` - The line number for the first line (default: ``1``). - - `linenostep` - If set to a number n > 1, only every nth line number is printed. - - `linenowidth` - Maximum width devoted to line numbers (default: ``3*ystep``, sufficient - for up to 4-digit line numbers. Increase width for longer code blocks). - - `xoffset` - Starting offset in X direction, defaults to ``0``. - - `yoffset` - Starting offset in Y direction, defaults to the font size if it is given - in pixels, or ``20`` else. (This is necessary since text coordinates - refer to the text baseline, not the top edge.) - - `ystep` - Offset to add to the Y coordinate for each subsequent line. This should - roughly be the text size plus 5. It defaults to that value if the text - size is given in pixels, or ``25`` else. - - `spacehack` - Convert spaces in the source to `` ``, which are non-breaking - spaces. SVG provides the ``xml:space`` attribute to control how - whitespace inside tags is handled, in theory, the ``preserve`` value - could be used to keep all whitespace as-is. However, many current SVG - viewers don't obey that rule, so this option is provided as a workaround - and defaults to ``True``. - """ - name = 'SVG' - aliases = ['svg'] - filenames = ['*.svg'] - - def __init__(self, **options): - Formatter.__init__(self, **options) - self.nowrap = get_bool_opt(options, 'nowrap', False) - self.fontfamily = options.get('fontfamily', 'monospace') - self.fontsize = options.get('fontsize', '14px') - self.xoffset = get_int_opt(options, 'xoffset', 0) - fs = self.fontsize.strip() - if fs.endswith('px'): fs = fs[:-2].strip() - try: - int_fs = int(fs) - except: - int_fs = 20 - self.yoffset = get_int_opt(options, 'yoffset', int_fs) - self.ystep = get_int_opt(options, 'ystep', int_fs + 5) - self.spacehack = get_bool_opt(options, 'spacehack', True) - self.linenos = get_bool_opt(options,'linenos',False) - self.linenostart = get_int_opt(options,'linenostart',1) - self.linenostep = get_int_opt(options,'linenostep',1) - self.linenowidth = get_int_opt(options,'linenowidth', 3*self.ystep) - self._stylecache = {} - - def format_unencoded(self, tokensource, outfile): - """ - Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` - tuples and write it into ``outfile``. - - For our implementation we put all lines in their own 'line group'. - """ - x = self.xoffset - y = self.yoffset - if not self.nowrap: - if self.encoding: - outfile.write('\n' % - self.encoding) - else: - outfile.write('\n') - outfile.write('\n') - outfile.write('\n') - outfile.write('\n' % - (self.fontfamily, self.fontsize)) - - counter = self.linenostart - counter_step = self.linenostep - counter_style = self._get_style(Comment) - line_x = x - - if self.linenos: - if counter % counter_step == 0: - outfile.write('%s' % - (x+self.linenowidth,y,counter_style,counter)) - line_x += self.linenowidth + self.ystep - counter += 1 - - outfile.write('' % (line_x, y)) - for ttype, value in tokensource: - style = self._get_style(ttype) - tspan = style and '' or '' - tspanend = tspan and '' or '' - value = escape_html(value) - if self.spacehack: - value = value.expandtabs().replace(' ', ' ') - parts = value.split('\n') - for part in parts[:-1]: - outfile.write(tspan + part + tspanend) - y += self.ystep - outfile.write('\n') - if self.linenos and counter % counter_step == 0: - outfile.write('%s' % - (x+self.linenowidth,y,counter_style,counter)) - - counter += 1 - outfile.write('' % (line_x,y)) - outfile.write(tspan + parts[-1] + tspanend) - outfile.write('') - - if not self.nowrap: - outfile.write('\n') - - def _get_style(self, tokentype): - if tokentype in self._stylecache: - return self._stylecache[tokentype] - otokentype = tokentype - while not self.style.styles_token(tokentype): - tokentype = tokentype.parent - value = self.style.style_for_token(tokentype) - result = '' - if value['color']: - result = ' fill="#' + value['color'] + '"' - if value['bold']: - result += ' font-weight="bold"' - if value['italic']: - result += ' font-style="italic"' - self._stylecache[otokentype] = result - return result diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/dotnet.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/dotnet.py deleted file mode 100644 index 5c488dd9743d9676c0079196c3af85ecac832739..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/dotnet.py +++ /dev/null @@ -1,841 +0,0 @@ -""" - pygments.lexers.dotnet - ~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for .net languages. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" -import re - -from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \ - using, this, default, words -from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \ - Name, String, Number, Literal, Other, Whitespace -from pygments.util import get_choice_opt -from pygments import unistring as uni - -from pygments.lexers.html import XmlLexer - -__all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer', - 'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer', 'XppLexer'] - - -class CSharpLexer(RegexLexer): - """ - For C# source code. - - Additional options accepted: - - `unicodelevel` - Determines which Unicode characters this lexer allows for identifiers. - The possible values are: - - * ``none`` -- only the ASCII letters and numbers are allowed. This - is the fastest selection. - * ``basic`` -- all Unicode characters from the specification except - category ``Lo`` are allowed. - * ``full`` -- all Unicode characters as specified in the C# specs - are allowed. Note that this means a considerable slowdown since the - ``Lo`` category has more than 40,000 characters in it! - - The default value is ``basic``. - - .. versionadded:: 0.8 - """ - - name = 'C#' - url = 'https://docs.microsoft.com/en-us/dotnet/csharp/' - aliases = ['csharp', 'c#', 'cs'] - filenames = ['*.cs'] - mimetypes = ['text/x-csharp'] # inferred - - flags = re.MULTILINE | re.DOTALL - - # for the range of allowed unicode characters in identifiers, see - # http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf - - levels = { - 'none': r'@?[_a-zA-Z]\w*', - 'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' + - '[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', - 'Cf', 'Mn', 'Mc') + ']*'), - 'full': ('@?(?:_|[^' + - uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' + - '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', - 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'), - } - - tokens = {} - token_variants = True - - for levelname, cs_ident in levels.items(): - tokens[levelname] = { - 'root': [ - # method names - (r'^([ \t]*)((?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type - r'(' + cs_ident + ')' # method name - r'(\s*)(\()', # signature start - bygroups(Whitespace, using(this), Name.Function, Whitespace, - Punctuation)), - (r'^(\s*)(\[.*?\])', bygroups(Whitespace, Name.Attribute)), - (r'[^\S\n]+', Whitespace), - (r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuation - (r'//.*?\n', Comment.Single), - (r'/[*].*?[*]/', Comment.Multiline), - (r'\n', Whitespace), - (words(( - '>>>=', '>>=', '<<=', '<=', '>=', '+=', '-=', '*=', '/=', - '%=', '&=', '|=', '^=', '??=', '=>', '??', '?.', '!=', '==', - '&&', '||', '>>>', '>>', '<<', '++', '--', '+', '-', '*', - '/', '%', '&', '|', '^', '<', '>', '?', '!', '~', '=', - )), Operator), - (r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator), - (r'[()\[\];:,.]', Punctuation), - (r'[{}]', Punctuation), - (r'@"(""|[^"])*"', String), - (r'\$?"(\\\\|\\[^\\]|[^"\\\n])*["\n]', String), - (r"'\\.'|'[^\\]'", String.Char), - (r"[0-9]+(\.[0-9]*)?([eE][+-][0-9]+)?" - r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number), - (r'(#)([ \t]*)(if|endif|else|elif|define|undef|' - r'line|error|warning|region|endregion|pragma)\b(.*?)(\n)', - bygroups(Comment.Preproc, Whitespace, Comment.Preproc, - Comment.Preproc, Whitespace)), - (r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Whitespace, - Keyword)), - (r'(abstract|as|async|await|base|break|by|case|catch|' - r'checked|const|continue|default|delegate|' - r'do|else|enum|event|explicit|extern|false|finally|' - r'fixed|for|foreach|goto|if|implicit|in|interface|' - r'internal|is|let|lock|new|null|on|operator|' - r'out|override|params|private|protected|public|readonly|' - r'ref|return|sealed|sizeof|stackalloc|static|' - r'switch|this|throw|true|try|typeof|' - r'unchecked|unsafe|virtual|void|while|' - r'get|set|new|partial|yield|add|remove|value|alias|ascending|' - r'descending|from|group|into|orderby|select|thenby|where|' - r'join|equals)\b', Keyword), - (r'(global)(::)', bygroups(Keyword, Punctuation)), - (r'(bool|byte|char|decimal|double|dynamic|float|int|long|object|' - r'sbyte|short|string|uint|ulong|ushort|var)\b\??', Keyword.Type), - (r'(class|struct)(\s+)', bygroups(Keyword, Whitespace), 'class'), - (r'(namespace|using)(\s+)', bygroups(Keyword, Whitespace), 'namespace'), - (cs_ident, Name), - ], - 'class': [ - (cs_ident, Name.Class, '#pop'), - default('#pop'), - ], - 'namespace': [ - (r'(?=\()', Text, '#pop'), # using (resource) - ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop'), - ] - } - - def __init__(self, **options): - level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic') - if level not in self._all_tokens: - # compile the regexes now - self._tokens = self.__class__.process_tokendef(level) - else: - self._tokens = self._all_tokens[level] - - RegexLexer.__init__(self, **options) - - -class NemerleLexer(RegexLexer): - """ - For Nemerle source code. - - Additional options accepted: - - `unicodelevel` - Determines which Unicode characters this lexer allows for identifiers. - The possible values are: - - * ``none`` -- only the ASCII letters and numbers are allowed. This - is the fastest selection. - * ``basic`` -- all Unicode characters from the specification except - category ``Lo`` are allowed. - * ``full`` -- all Unicode characters as specified in the C# specs - are allowed. Note that this means a considerable slowdown since the - ``Lo`` category has more than 40,000 characters in it! - - The default value is ``basic``. - - .. versionadded:: 1.5 - """ - - name = 'Nemerle' - url = 'http://nemerle.org' - aliases = ['nemerle'] - filenames = ['*.n'] - mimetypes = ['text/x-nemerle'] # inferred - - flags = re.MULTILINE | re.DOTALL - - # for the range of allowed unicode characters in identifiers, see - # http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf - - levels = { - 'none': r'@?[_a-zA-Z]\w*', - 'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' + - '[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', - 'Cf', 'Mn', 'Mc') + ']*'), - 'full': ('@?(?:_|[^' + - uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' + - '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', - 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'), - } - - tokens = {} - token_variants = True - - for levelname, cs_ident in levels.items(): - tokens[levelname] = { - 'root': [ - # method names - (r'^([ \t]*)((?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type - r'(' + cs_ident + ')' # method name - r'(\s*)(\()', # signature start - bygroups(Whitespace, using(this), Name.Function, Whitespace, \ - Punctuation)), - (r'^(\s*)(\[.*?\])', bygroups(Whitespace, Name.Attribute)), - (r'[^\S\n]+', Whitespace), - (r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuation - (r'//.*?\n', Comment.Single), - (r'/[*].*?[*]/', Comment.Multiline), - (r'\n', Whitespace), - (r'(\$)(\s*)(")', bygroups(String, Whitespace, String), - 'splice-string'), - (r'(\$)(\s*)(<#)', bygroups(String, Whitespace, String), - 'splice-string2'), - (r'<#', String, 'recursive-string'), - - (r'(<\[)(\s*)(' + cs_ident + ':)?', - bygroups(Keyword, Whitespace, Keyword)), - (r'\]\>', Keyword), - - # quasiquotation only - (r'\$' + cs_ident, Name), - (r'(\$)(\()', bygroups(Name, Punctuation), - 'splice-string-content'), - - (r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation), - (r'[{}]', Punctuation), - (r'@"(""|[^"])*"', String), - (r'"(\\\\|\\[^\\]|[^"\\\n])*["\n]', String), - (r"'\\.'|'[^\\]'", String.Char), - (r"0[xX][0-9a-fA-F]+[Ll]?", Number), - (r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?", Number), - (r'(#)([ \t]*)(if|endif|else|elif|define|undef|' - r'line|error|warning|region|endregion|pragma)\b', - bygroups(Comment.Preproc, Whitespace, Comment.Preproc), 'preproc'), - (r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Whitespace, Keyword)), - (r'(abstract|and|as|base|catch|def|delegate|' - r'enum|event|extern|false|finally|' - r'fun|implements|interface|internal|' - r'is|macro|match|matches|module|mutable|new|' - r'null|out|override|params|partial|private|' - r'protected|public|ref|sealed|static|' - r'syntax|this|throw|true|try|type|typeof|' - r'virtual|volatile|when|where|with|' - r'assert|assert2|async|break|checked|continue|do|else|' - r'ensures|for|foreach|if|late|lock|new|nolate|' - r'otherwise|regexp|repeat|requires|return|surroundwith|' - r'unchecked|unless|using|while|yield)\b', Keyword), - (r'(global)(::)', bygroups(Keyword, Punctuation)), - (r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|' - r'short|string|uint|ulong|ushort|void|array|list)\b\??', - Keyword.Type), - (r'(:>?)(\s*)(' + cs_ident + r'\??)', - bygroups(Punctuation, Whitespace, Keyword.Type)), - (r'(class|struct|variant|module)(\s+)', - bygroups(Keyword, Whitespace), 'class'), - (r'(namespace|using)(\s+)', bygroups(Keyword, Whitespace), - 'namespace'), - (cs_ident, Name), - ], - 'class': [ - (cs_ident, Name.Class, '#pop') - ], - 'preproc': [ - (r'\w+', Comment.Preproc), - (r'[ \t]+', Whitespace), - (r'\n', Whitespace, '#pop') - ], - 'namespace': [ - (r'(?=\()', Text, '#pop'), # using (resource) - ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop') - ], - 'splice-string': [ - (r'[^"$]', String), - (r'\$' + cs_ident, Name), - (r'(\$)(\()', bygroups(Name, Punctuation), - 'splice-string-content'), - (r'\\"', String), - (r'"', String, '#pop') - ], - 'splice-string2': [ - (r'[^#<>$]', String), - (r'\$' + cs_ident, Name), - (r'(\$)(\()', bygroups(Name, Punctuation), - 'splice-string-content'), - (r'<#', String, '#push'), - (r'#>', String, '#pop') - ], - 'recursive-string': [ - (r'[^#<>]', String), - (r'<#', String, '#push'), - (r'#>', String, '#pop') - ], - 'splice-string-content': [ - (r'if|match', Keyword), - (r'[~!%^&*+=|\[\]:;,.<>/?-\\"$ ]', Punctuation), - (cs_ident, Name), - (r'\d+', Number), - (r'\(', Punctuation, '#push'), - (r'\)', Punctuation, '#pop') - ] - } - - def __init__(self, **options): - level = get_choice_opt(options, 'unicodelevel', list(self.tokens), - 'basic') - if level not in self._all_tokens: - # compile the regexes now - self._tokens = self.__class__.process_tokendef(level) - else: - self._tokens = self._all_tokens[level] - - RegexLexer.__init__(self, **options) - - def analyse_text(text): - """Nemerle is quite similar to Python, but @if is relatively uncommon - elsewhere.""" - result = 0 - - if '@if' in text: - result += 0.1 - - return result - - -class BooLexer(RegexLexer): - """ - For Boo source code. - """ - - name = 'Boo' - url = 'https://github.com/boo-lang/boo' - aliases = ['boo'] - filenames = ['*.boo'] - mimetypes = ['text/x-boo'] - - tokens = { - 'root': [ - (r'\s+', Whitespace), - (r'(#|//).*$', Comment.Single), - (r'/[*]', Comment.Multiline, 'comment'), - (r'[]{}:(),.;[]', Punctuation), - (r'(\\)(\n)', bygroups(Text, Whitespace)), - (r'\\', Text), - (r'(in|is|and|or|not)\b', Operator.Word), - (r'/(\\\\|\\[^\\]|[^/\\\s])/', String.Regex), - (r'@/(\\\\|\\[^\\]|[^/\\])*/', String.Regex), - (r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator), - (r'(as|abstract|callable|constructor|destructor|do|import|' - r'enum|event|final|get|interface|internal|of|override|' - r'partial|private|protected|public|return|set|static|' - r'struct|transient|virtual|yield|super|and|break|cast|' - r'continue|elif|else|ensure|except|for|given|goto|if|in|' - r'is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|' - r'while|from|as)\b', Keyword), - (r'def(?=\s+\(.*?\))', Keyword), - (r'(def)(\s+)', bygroups(Keyword, Whitespace), 'funcname'), - (r'(class)(\s+)', bygroups(Keyword, Whitespace), 'classname'), - (r'(namespace)(\s+)', bygroups(Keyword, Whitespace), 'namespace'), - (r'(?', Name.Attribute), - (r'\s+', Whitespace), - (r'\n', Whitespace), - (r'(rem\b.*?)(\n)', bygroups(Comment, Whitespace)), - (r"('.*?)(\n)", bygroups(Comment, Whitespace)), - (r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#Else|#End\s+If|#Const|' - r'#ExternalSource.*?\n|#End\s+ExternalSource|' - r'#Region.*?\n|#End\s+Region|#ExternalChecksum', - Comment.Preproc), - (r'[(){}!#,.:]', Punctuation), - (r'(Option)(\s+)(Strict|Explicit|Compare)(\s+)' - r'(On|Off|Binary|Text)', - bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, - Whitespace, Keyword.Declaration)), - (words(( - 'AddHandler', 'Alias', 'ByRef', 'ByVal', 'Call', 'Case', - 'Catch', 'CBool', 'CByte', 'CChar', 'CDate', 'CDec', 'CDbl', - 'CInt', 'CLng', 'CObj', 'Continue', 'CSByte', 'CShort', 'CSng', - 'CStr', 'CType', 'CUInt', 'CULng', 'CUShort', 'Declare', - 'Default', 'Delegate', 'DirectCast', 'Do', 'Each', 'Else', - 'ElseIf', 'EndIf', 'Erase', 'Error', 'Event', 'Exit', 'False', - 'Finally', 'For', 'Friend', 'Get', 'Global', 'GoSub', 'GoTo', - 'Handles', 'If', 'Implements', 'Inherits', 'Interface', 'Let', - 'Lib', 'Loop', 'Me', 'MustInherit', 'MustOverride', 'MyBase', - 'MyClass', 'Narrowing', 'New', 'Next', 'Not', 'Nothing', - 'NotInheritable', 'NotOverridable', 'Of', 'On', 'Operator', - 'Option', 'Optional', 'Overloads', 'Overridable', 'Overrides', - 'ParamArray', 'Partial', 'Private', 'Protected', 'Public', - 'RaiseEvent', 'ReadOnly', 'ReDim', 'RemoveHandler', 'Resume', - 'Return', 'Select', 'Set', 'Shadows', 'Shared', 'Single', - 'Static', 'Step', 'Stop', 'SyncLock', 'Then', 'Throw', 'To', - 'True', 'Try', 'TryCast', 'Wend', 'Using', 'When', 'While', - 'Widening', 'With', 'WithEvents', 'WriteOnly'), - prefix=r'(?>=|<<|>>|:=|' - r'<=|>=|<>|[-&*/\\^+=<>\[\]]', - Operator), - ('"', String, 'string'), - (r'(_)(\n)', bygroups(Text, Whitespace)), # Line continuation (must be before Name) - (uni_name + '[%&@!#$]?', Name), - ('#.*?#', Literal.Date), - (r'(\d+\.\d*|\d*\.\d+)(F[+-]?[0-9]+)?', Number.Float), - (r'\d+([SILDFR]|US|UI|UL)?', Number.Integer), - (r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer), - (r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer), - ], - 'string': [ - (r'""', String), - (r'"C?', String, '#pop'), - (r'[^"]+', String), - ], - 'dim': [ - (uni_name, Name.Variable, '#pop'), - default('#pop'), # any other syntax - ], - 'funcname': [ - (uni_name, Name.Function, '#pop'), - ], - 'classname': [ - (uni_name, Name.Class, '#pop'), - ], - 'namespace': [ - (uni_name, Name.Namespace), - (r'\.', Name.Namespace), - default('#pop'), - ], - 'end': [ - (r'\s+', Whitespace), - (r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b', - Keyword, '#pop'), - default('#pop'), - ] - } - - def analyse_text(text): - if re.search(r'^\s*(#If|Module|Namespace)', text, re.MULTILINE): - return 0.5 - - -class GenericAspxLexer(RegexLexer): - """ - Lexer for ASP.NET pages. - """ - - name = 'aspx-gen' - filenames = [] - mimetypes = [] - - flags = re.DOTALL - - tokens = { - 'root': [ - (r'(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)), - (r'()(.*?)()', bygroups(using(XmlLexer), - Other, - using(XmlLexer))), - (r'(.+?)(?=<)', using(XmlLexer)), - (r'.+', using(XmlLexer)), - ], - } - - -# TODO support multiple languages within the same source file -class CSharpAspxLexer(DelegatingLexer): - """ - Lexer for highlighting C# within ASP.NET pages. - """ - - name = 'aspx-cs' - aliases = ['aspx-cs'] - filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'] - mimetypes = [] - - def __init__(self, **options): - super().__init__(CSharpLexer, GenericAspxLexer, **options) - - def analyse_text(text): - if re.search(r'Page\s*Language="C#"', text, re.I) is not None: - return 0.2 - elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None: - return 0.15 - - -class VbNetAspxLexer(DelegatingLexer): - """ - Lexer for highlighting Visual Basic.net within ASP.NET pages. - """ - - name = 'aspx-vb' - aliases = ['aspx-vb'] - filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'] - mimetypes = [] - - def __init__(self, **options): - super().__init__(VbNetLexer, GenericAspxLexer, **options) - - def analyse_text(text): - if re.search(r'Page\s*Language="Vb"', text, re.I) is not None: - return 0.2 - elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None: - return 0.15 - - -# Very close to functional.OcamlLexer -class FSharpLexer(RegexLexer): - """ - For the F# language (version 3.0). - - .. versionadded:: 1.5 - """ - - name = 'F#' - url = 'https://fsharp.org/' - aliases = ['fsharp', 'f#'] - filenames = ['*.fs', '*.fsi', '*.fsx'] - mimetypes = ['text/x-fsharp'] - - keywords = [ - 'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default', - 'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else', - 'end', 'exception', 'extern', 'false', 'finally', 'for', 'function', - 'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal', - 'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable', - 'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public', - 'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to', - 'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when', - 'while', 'with', 'yield!', 'yield', - ] - # Reserved words; cannot hurt to color them as keywords too. - keywords += [ - 'atomic', 'break', 'checked', 'component', 'const', 'constraint', - 'constructor', 'continue', 'eager', 'event', 'external', 'fixed', - 'functor', 'include', 'method', 'mixin', 'object', 'parallel', - 'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait', - 'virtual', 'volatile', - ] - keyopts = [ - '!=', '#', '&&', '&', r'\(', r'\)', r'\*', r'\+', ',', r'-\.', - '->', '-', r'\.\.', r'\.', '::', ':=', ':>', ':', ';;', ';', '<-', - r'<\]', '<', r'>\]', '>', r'\?\?', r'\?', r'\[<', r'\[\|', r'\[', r'\]', - '_', '`', r'\{', r'\|\]', r'\|', r'\}', '~', '<@@', '<@', '=', '@>', '@@>', - ] - - operators = r'[!$%&*+\./:<=>?@^|~-]' - word_operators = ['and', 'or', 'not'] - prefix_syms = r'[!?~]' - infix_syms = r'[=<>@^|&+\*/$%-]' - primitives = [ - 'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single', - 'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', - 'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string', - 'list', 'exn', 'obj', 'enum', - ] - - # See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or - # http://fsharp.org/about/files/spec.pdf for reference. Good luck. - - tokens = { - 'escape-sequence': [ - (r'\\[\\"\'ntbrafv]', String.Escape), - (r'\\[0-9]{3}', String.Escape), - (r'\\u[0-9a-fA-F]{4}', String.Escape), - (r'\\U[0-9a-fA-F]{8}', String.Escape), - ], - 'root': [ - (r'\s+', Whitespace), - (r'\(\)|\[\]', Name.Builtin.Pseudo), - (r'\b(? and <| are weak - indicators.""" - result = 0 - if '|>' in text: - result += 0.05 - if '<|' in text: - result += 0.05 - - return result - - -class XppLexer(RegexLexer): - - """ - For X++ source code. This is based loosely on the CSharpLexer - - .. versionadded:: 2.15 - """ - - name = 'X++' - url = 'https://learn.microsoft.com/en-us/dynamics365/fin-ops-core/dev-itpro/dev-ref/xpp-language-reference' - aliases = ['xpp', 'x++'] - filenames = ['*.xpp'] - - flags = re.MULTILINE - - XPP_CHARS = ('@?(?:_|[^' + - uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' + - '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', - 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'); - # Temporary, see - # https://github.com/thatch/regexlint/pull/49 - XPP_CHARS = XPP_CHARS.replace('\x00', '\x01') - - OPERATORS = ( - '<=', '>=', '+=', '-=', '*=', '/=', '!=', '==', - '&&', '||', '>>', '<<', '++', '--', '+', '-', '*', - '/', '%', '&', '|', '^', '<', '>', '?', '!', '~', '=', - ) - KEYWORDS = ('abstract','anytype','as','async','asc','at','avg','break','breakpoint','by','byref','case','catch', - 'changecompany','client','container','continue','count','crosscompany','default','delegate', - 'delete_from','desc','display','div','do','edit','else','element','eventhandler','exists','false','final', - 'firstfast','firstonly','firstonly10','firstonly100','firstonly1000','flush','for','forceliterals', - 'forcenestedloop','forceplaceholders','forceselectorder','forupdate','from','group','if','insert_recordset', - 'interface','is','join','like','maxof','minof','mod','new','next','nofetch','notexists','null','optimisticlock','order', - 'outer','pause','pessimisticlock','print','private','protected','public','repeatableread','retry','return', - 'reverse','select','server','setting','static','sum','super','switch','tablelock','this','throw','true','try','ttsabort','ttsbegin', - 'ttscommit','update_recordset','validtimestate','void','where','while','window') - RUNTIME_FUNCTIONS = ('_duration','abs','acos','any2Date','any2Enum','any2Guid','any2Int','any2Int64','any2Real','any2Str','anytodate', - 'anytoenum','anytoguid','anytoint','anytoint64','anytoreal','anytostr','asin','atan','beep','cTerm','char2Num','classIdGet', - 'corrFlagGet','corrFlagSet','cos','cosh','curExt','curUserId','date2Num','date2Str','datetime2Str','dayName','dayOfMth', - 'dayOfWk','dayOfYr','ddb','decRound','dg','dimOf','endMth','enum2str','exp','exp10','fV','fieldId2Name','fieldId2PName', - 'fieldName2Id','frac','funcName','getCurrentPartition','getCurrentPartitionRecId','getPrefix','guid2Str','idg','indexId2Name', - 'indexName2Id','int2Str','int642Str','intvMax','intvName','intvNo','intvNorm','log10','logN','match','max','min','mkDate','mthName', - 'mthOfYr','newGuid','nextMth','nextQtr','nextYr','num2Char','num2Date','num2Str','pmt','power','prevMth','prevQtr','prevYr', - 'prmIsDefault','pt','pv','rate','refPrintAll','round','runAs','sessionId','setPrefix','sin','sinh','sleep','sln','str2Date', - 'str2Datetime','str2Enum','str2Guid','str2Int','str2Int64','str2Num','str2Time','strAlpha','strCmp','strColSeq','strDel', - 'strFind','strFmt','strIns','strKeep','strLTrim','strLen','strLine','strLwr','strNFind','strPoke','strPrompt','strRTrim', - 'strRem','strRep','strScan','strUpr','subStr','syd','systemDateGet','systemDateSet','tableId2Name', - 'tableId2PName','tableName2Id','tan','tanh','term','time2Str','timeNow','today','trunc','typeOf','uint2Str','wkOfYr','year') - COMPILE_FUNCTIONS = ('attributeStr','classNum','classStr','configurationKeyNum','configurationKeyStr','dataEntityDataSourceStr','delegateStr', - 'dimensionHierarchyLevelStr','dimensionHierarchyStr','dimensionReferenceStr','dutyStr','enumCnt','enumLiteralStr','enumNum','enumStr', - 'extendedTypeNum','extendedTypeStr','fieldNum','fieldPName','fieldStr','formControlStr','formDataFieldStr','formDataSourceStr', - 'formMethodStr','formStr','identifierStr','indexNum','indexStr','licenseCodeNum','licenseCodeStr','literalStr','maxDate','maxInt', - 'measureStr','measurementStr','menuItemActionStr','menuItemDisplayStr','menuItemOutputStr','menuStr','methodStr','minInt','privilegeStr', - 'queryDatasourceStr','queryMethodStr','queryStr','reportStr','resourceStr','roleStr','ssrsReportStr','staticDelegateStr','staticMethodStr', - 'tableCollectionStr','tableFieldGroupStr','tableMethodStr','tableNum','tablePName','tableStaticMethodStr','tableStr','tileStr','varStr', - 'webActionItemStr','webDisplayContentItemStr','webFormStr','webMenuStr','webOutputContentItemStr','webReportStr','webSiteTempStr', - 'webStaticFileStr','webUrlItemStr','webWebPartStr','webletItemStr','webpageDefStr','websiteDefStr','workflowApprovalStr', - 'workflowCategoryStr','workflowTaskStr','workflowTypeStr') - - tokens = {} - - tokens = { - 'root': [ - # method names - (r'(\s*)\b(else|if)\b([^\n])', bygroups(Whitespace, Keyword, using(this))), # ensure that if is not treated like a function - (r'^([ \t]*)((?:' + XPP_CHARS + r'(?:\[\])?\s+)+?)' # return type - r'(' + XPP_CHARS + ')' # method name - r'(\s*)(\()', # signature start - bygroups(Whitespace, using(this), Name.Function, Whitespace, - Punctuation)), - (r'^(\s*)(\[)([^\n]*?)(\])', bygroups(Whitespace, Name.Attribute, Name.Variable.Class, Name.Attribute)), - (r'[^\S\n]+', Whitespace), - (r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuation - (r'//[^\n]*?\n', Comment.Single), - (r'/[*][^\n]*?[*]/', Comment.Multiline), - (r'\n', Whitespace), - (words(OPERATORS), Operator), - (r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator), - (r'[()\[\];:,.#@]', Punctuation), - (r'[{}]', Punctuation), - (r'@"(""|[^"])*"', String), - (r'\$?"(\\\\|\\[^\\]|[^"\\\n])*["\n]', String), - (r"'\\.'|'[^\\]'", String.Char), - (r"[0-9]+(\.[0-9]*)?([eE][+-][0-9]+)?" - r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number), - (words(KEYWORDS, suffix=r'\b'), Keyword), - (r'(boolean|int|int64|str|real|guid|date)\b\??', Keyword.Type), - (r'(class|struct|extends|implements)(\s+)', bygroups(Keyword, Whitespace), 'class'), - (r'('+XPP_CHARS+')(::)', bygroups(Name.Variable.Class, Punctuation)), - (r'(\s*)(\w+)(\s+\w+(,|=)?[^\n]*;)', bygroups(Whitespace, Name.Variable.Class, using(this))), # declaration - # x++ specific function to get field should highlight the classname - (r'(fieldNum\()('+XPP_CHARS+r')(\s*,\s*)('+XPP_CHARS+r')(\s*\))', - bygroups(using(this), Name.Variable.Class, using(this), Name.Property, using(this))), - # x++ specific function to get table should highlight the classname - (r'(tableNum\()('+XPP_CHARS+r')(\s*\))', - bygroups(using(this), Name.Variable.Class, using(this))), - (words(RUNTIME_FUNCTIONS, suffix=r'(?=\()'), Name.Function.Magic), - (words(COMPILE_FUNCTIONS, suffix=r'(?=\()'), Name.Function.Magic), - (XPP_CHARS, Name), - ], - 'class': [ - (XPP_CHARS, Name.Class, '#pop'), - default('#pop'), - ], - 'namespace': [ - (r'(?=\()', Text, '#pop'), # using (resource) - ('(' + XPP_CHARS + r'|\.)+', Name.Namespace, '#pop'), - ] - } diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Freedownloadbluestacksforwindows7withram1gb [NEW].md b/spaces/quidiaMuxgu/Expedit-SAM/Freedownloadbluestacksforwindows7withram1gb [NEW].md deleted file mode 100644 index 277eecabb16ea9a33c07d000ecd646e43c79054a..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Freedownloadbluestacksforwindows7withram1gb [NEW].md +++ /dev/null @@ -1,11 +0,0 @@ -
            -

            reply Ivyjaway. at 11:17 pm. https://trello.com/c/sRkiEUPN/35-download-freedownloadbluestacksforwindows7withram1gb. Ismh4ush. at 7:46 am. https://trello.com/c/5QqCWnRB/54-freedownloadbluestacksforwindows7withram1gb-link

            -

            freedownloadbluestacksforwindows7withram1gb


            Download >>>>> https://geags.com/2uCqla



            -

            Reply ivayatdc. at 1:04 pm. https://trello.com/c/rvI7JzXP/90-freedownloadbluestacksforwindows7withram1gb. r8i.it Reply adusv9j9. at 5:17 am. url https://trello.com/c/5QqCWnRB/54-freedownloadbluestacksforwindows7withram1gb-link

            -

            Kasino. download movie in hindi facebook 2018 adult theme. [url=https://trello.com/c/5QqCWnRB/54-freedownloadbluestacksforwindows7withram1gb-link]https://trello.com/c/5QqCWnRB/54-freedownloadbluestacksforwindows7withram1gb-link[/url]

            -

            2015saudagar Xfinity tv the best place to watch live tv online. [url=https://trello.com/c/5QqCWnRB/54-freedownloadbluestacksforwindows7withram1gb-link]https://trello.com/c/5QqCWnRB/54-freedownloadbluestacksforwindows7withram1gb-link[/url].

            -

            -

            movie torrent srimps legend of jay 2010. [url=https://trello.com/c/ioqb5wbb/51-freedownloadbluestacksforwindows7withram1gb]https://trello.com/c/ioqb5wbb/51-freedownloadbluestacksforwindows7withram1gb[/url].

            -

            [url=https://trello.com/c/5QqCWnRB/54-freedownloadbluestacksforwindows7withram1gb-link]https://trello.com/c/5QqCWnRB/54-freedownloadbluestacksforwindows7withram1gb-link[/url] This is a free movie released to the public in 2001. https://trello.com/c/5QqCWnRB/54-freedownloadbluestacksforwindows7withram1gb-link [url=https://trello.

            899543212b
            -
            -
            \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Les Mills - BODYCOMBAT 53 Master Class .zip REPACK.md b/spaces/quidiaMuxgu/Expedit-SAM/Les Mills - BODYCOMBAT 53 Master Class .zip REPACK.md deleted file mode 100644 index 79583dfe8bb2deb7ac4d824b0c70d4cb9a6cf6ab..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Les Mills - BODYCOMBAT 53 Master Class .zip REPACK.md +++ /dev/null @@ -1,105 +0,0 @@ -
            -

            Les Mills - BODYCOMBAT 53 Master Class .zip: The Ultimate Workout Experience

            -

            If you are looking for a way to get fit, have fun, and unleash your inner warrior, you need to try Les Mills - BODYCOMBAT 53 Master Class .zip. This is a high-intensity martial arts-inspired workout that will challenge your body and mind. You will learn moves from karate, taekwondo, boxing, muay thai, capoeira, and more. You will burn calories, build strength, improve agility, and boost your confidence.

            -

            What is Les Mills - BODYCOMBAT 53 Master Class .zip?

            -

            Les Mills - BODYCOMBAT 53 Master Class .zip is a digital download of the latest BODYCOMBAT release from Les Mills, the world leaders in group fitness. It contains a video of the master class, which is a full-length workout led by the creators of BODYCOMBAT, Dan Cohen and Rachael Newsham. It also contains a music tracklist, a choreography booklet, and a bonus track.

            -

            Les Mills - BODYCOMBAT 53 Master Class .zip


            Download Ziphttps://geags.com/2uCs2U



            -

            Why should you download Les Mills - BODYCOMBAT 53 Master Class .zip?

            -

            There are many reasons why you should download Les Mills - BODYCOMBAT 53 Master Class .zip. Here are some of them:

            -
              -
            • You will get access to the newest and most exciting BODYCOMBAT workout ever.
            • -
            • You will be able to stream or download the master class on any device, anytime, anywhere.
            • -
            • You will be able to follow along with the experts and learn from their tips and cues.
            • -
            • You will be able to enjoy the amazing music and feel the energy of the class.
            • -
            • You will be able to customize your workout by choosing the tracks you want to do.
            • -
            • You will be able to challenge yourself and track your progress with the bonus track.
            • -
            -

            How can you download Les Mills - BODYCOMBAT 53 Master Class .zip?

            -

            Downloading Les Mills - BODYCOMBAT 53 Master Class .zip is easy and fast. All you need to do is follow these steps:

            -
              -
            1. Click on this link: https://bytlly.com/2t8xEz
            2. -
            3. Enter your email address and payment details.
            4. -
            5. Confirm your order and receive your download link.
            6. -
            7. Enjoy your workout!
            8. -
            -
            What are you waiting for?
            -

            Les Mills - BODYCOMBAT 53 Master Class .zip is the ultimate workout experience for anyone who loves martial arts and fitness. It will help you achieve your goals and have fun at the same time. Don't miss this opportunity to get your hands on this exclusive download. Order now and get ready to unleash your inner warrior!

            -
            What are the benefits of Les Mills - BODYCOMBAT 53 Master Class .zip?
            -

            Les Mills - BODYCOMBAT 53 Master Class .zip is not only a fun and exciting workout, but also a great way to improve your health and fitness. Here are some of the benefits you can expect from doing this workout regularly:

            -
              -
            • You will burn up to 740 calories per hour and boost your metabolism.
            • -
            • You will tone and shape your arms, legs, back, and core muscles.
            • -
            • You will improve your cardiovascular and respiratory system.
            • -
            • You will enhance your coordination, balance, and agility.
            • -
            • You will reduce stress and anxiety levels.
            • -
            • You will increase your self-esteem and confidence.
            • -
            -What are the reviews of Les Mills - BODYCOMBAT 53 Master Class .zip? -

            Les Mills - BODYCOMBAT 53 Master Class .zip has received rave reviews from people who have tried it. Here are some of the testimonials from satisfied customers:

            -
            "I love Les Mills - BODYCOMBAT 53 Master Class .zip! It's the best workout ever! I feel so strong and powerful after doing it. The music is awesome and the instructors are amazing. I highly recommend it to anyone who wants to get fit and have fun." - Sarah
            -
            "Les Mills - BODYCOMBAT 53 Master Class .zip is a game-changer for me. It has helped me lose weight, gain muscle, and improve my mood. I look forward to doing it every day. It's like a therapy session for me. It's worth every penny." - James
            -
            "Les Mills - BODYCOMBAT 53 Master Class .zip is a blast! It's like a party on a mat. I love the variety of moves and styles. It keeps me motivated and challenged. It's the perfect workout for anyone who loves martial arts and music." - Lisa
            -How can you get started with Les Mills - BODYCOMBAT 53 Master Class .zip? -

            Les Mills - BODYCOMBAT 53 Master Class .zip is suitable for anyone who wants to try a martial arts-inspired workout. You don't need any equipment or experience to join the class. You just need a mat, some water, and a lot of enthusiasm. Here are some tips to help you get started:

            -

            -
              -
            • Download Les Mills - BODYCOMBAT 53 Master Class .zip from this link: https://bytlly.com/2t8xEz
            • -
            • Watch the video and read the booklet to familiarize yourself with the moves and the structure of the class.
            • -
            • Choose a time and place where you can do the workout without any distractions.
            • -
            • Wear comfortable clothes and shoes that allow you to move freely.
            • -
            • Warm up before the class and cool down after the class.
            • -
            • Modify the moves according to your fitness level and abilities.
            • -
            • Have fun and enjoy the music and the atmosphere.
            • -
            -Conclusion -

            Les Mills - BODYCOMBAT 53 Master Class .zip is a revolutionary workout that will transform your body and mind. It will help you burn calories, tone muscles, improve health, and unleash your inner warrior. It will also make you feel happy, energized, and confident. If you are looking for a workout that is fun, effective, and empowering, you need to download Les Mills - BODYCOMBAT 53 Master Class .zip today. You won't regret it!

            -FAQs about Les Mills - BODYCOMBAT 53 Master Class .zip -

            You may have some questions about Les Mills - BODYCOMBAT 53 Master Class .zip. Here are some of the most frequently asked questions and their answers:

            -
            -
            How long is Les Mills - BODYCOMBAT 53 Master Class .zip?
            -
            Les Mills - BODYCOMBAT 53 Master Class .zip is about 55 minutes long, including the warm-up and cool-down. The bonus track is about 5 minutes long.
            -
            How often should I do Les Mills - BODYCOMBAT 53 Master Class .zip?
            -
            You can do Les Mills - BODYCOMBAT 53 Master Class .zip as often as you like, depending on your goals and fitness level. However, it is recommended that you do it at least twice a week for optimal results.
            -
            What equipment do I need for Les Mills - BODYCOMBAT 53 Master Class .zip?
            -
            You don't need any equipment for Les Mills - BODYCOMBAT 53 Master Class .zip. You just need a mat, some water, and a lot of enthusiasm.
            -
            Is Les Mills - BODYCOMBAT 53 Master Class .zip suitable for beginners?
            -
            Yes, Les Mills - BODYCOMBAT 53 Master Class .zip is suitable for beginners. You can modify the moves according to your fitness level and abilities. You can also follow the low-impact options if you have any injuries or limitations.
            -
            Where can I get more information about Les Mills - BODYCOMBAT 53 Master Class .zip?
            -
            You can get more information about Les Mills - BODYCOMBAT 53 Master Class .zip from the official website of Les Mills: https://www.lesmills.com/workouts/fitness-classes/bodycombat/. You can also join the online community of BODYCOMBAT fans on Facebook, Instagram, and YouTube.
            -
            -What are the best tips for Les Mills - BODYCOMBAT 53 Master Class .zip? -

            Les Mills - BODYCOMBAT 53 Master Class .zip is a fun and effective workout that will make you feel like a champion. However, to get the most out of it, you need to follow some tips and tricks. Here are some of the best tips for Les Mills - BODYCOMBAT 53 Master Class .zip:

            -
              -
            • Focus on your technique and form. Make sure you execute each move correctly and safely. Don't sacrifice quality for quantity.
            • -
            • Breathe deeply and rhythmically. Don't hold your breath or hyperventilate. Breathe in through your nose and out through your mouth.
            • -
            • Drink plenty of water before, during, and after the workout. Stay hydrated and avoid dehydration.
            • -
            • Eat a balanced and nutritious diet. Fuel your body with healthy foods that will give you energy and support your recovery.
            • -
            • Rest and recover well. Don't overdo it or push yourself too hard. Give your body time to heal and adapt.
            • -
            -What are the alternatives to Les Mills - BODYCOMBAT 53 Master Class .zip? -

            Les Mills - BODYCOMBAT 53 Master Class .zip is a great workout that will suit many people. However, if you want to try something different or add some variety to your routine, you can also check out these alternatives:

            -
              -
            • Les Mills - BODYATTACK: A high-energy cardio workout that combines athletic movements with strength and stabilization exercises.
            • -
            • Les Mills - BODYPUMP: A full-body barbell workout that challenges all your major muscle groups.
            • -
            • Les Mills - BODYBALANCE: A yoga-based class that blends elements of tai chi and pilates.
            • -
            • Les Mills - CXWORX: A core-focused workout that targets your abs, back, glutes, and hips.
            • -
            • Les Mills - SH'BAM: A dance-inspired workout that features simple but sassy moves set to a party playlist.
            • -
            -What are the challenges of Les Mills - BODYCOMBAT 53 Master Class .zip? -

            Les Mills - BODYCOMBAT 53 Master Class .zip is a rewarding workout that will make you feel accomplished and proud. However, it is not a walk in the park. It is a challenging workout that will test your limits and push you out of your comfort zone. Here are some of the challenges of Les Mills - BODYCOMBAT 53 Master Class .zip:

            -
              -
            • It is fast-paced and intense. You will need to keep up with the tempo and the intensity of the class. You will need to move quickly and powerfully.
            • -
            • It is complex and varied. You will need to learn and execute different moves and styles from various martial arts disciplines. You will need to remember the combinations and sequences.
            • -
            • It is demanding and exhausting. You will need to use your whole body and engage all your muscles. You will need to endure the fatigue and soreness.
            • -
            -How can you overcome the challenges of Les Mills - BODYCOMBAT 53 Master Class .zip? -

            Les Mills - BODYCOMBAT 53 Master Class .zip is a tough workout that will challenge you physically and mentally. However, you can overcome these challenges and enjoy the workout with some strategies. Here are some ways to overcome the challenges of Les Mills - BODYCOMBAT 53 Master Class .zip:

            -
              -
            • Start slow and build up gradually. Don't try to do everything at once. Start with the basics and progress to the advanced moves as you get more comfortable and confident.
            • -
            • Follow the instructors and cues. Don't worry if you make mistakes or miss some steps. Just follow the instructors and their cues. They will guide you through the class and help you improve your technique and form.
            • -
            • Listen to your body and take breaks. Don't push yourself too hard or ignore any signs of discomfort or pain. Listen to your body and take breaks when you need to. Drink water, stretch, and rest.
            • -
            -Conclusion -

            Les Mills - BODYCOMBAT 53 Master Class .zip is a revolutionary workout that will transform your body and mind. It will help you burn calories, tone muscles, improve health, and unleash your inner warrior. It will also make you feel happy, energized, and confident. If you are looking for a workout that is fun, effective, and empowering, you need to download Les Mills - BODYCOMBAT 53 Master Class .zip today. You won't regret it!

            3cee63e6c2
            -
            -
            \ No newline at end of file diff --git a/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/models/gnn/__init__.py b/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/models/gnn/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/radames/UserControllableLT-Latent-Transformer/interface/model_loader.py b/spaces/radames/UserControllableLT-Latent-Transformer/interface/model_loader.py deleted file mode 100644 index afc6050d88088a2b1f1086afc790b35554e6173b..0000000000000000000000000000000000000000 --- a/spaces/radames/UserControllableLT-Latent-Transformer/interface/model_loader.py +++ /dev/null @@ -1,191 +0,0 @@ -import os -from argparse import Namespace -import numpy as np -import torch - -from models.StyleGANControler import StyleGANControler - - -class Model: - def __init__( - self, checkpoint_path, truncation=0.5, use_average_code_as_input=False - ): - self.truncation = truncation - self.use_average_code_as_input = use_average_code_as_input - ckpt = torch.load(checkpoint_path, map_location="cpu") - opts = ckpt["opts"] - opts["checkpoint_path"] = checkpoint_path - self.opts = Namespace(**ckpt["opts"]) - self.net = StyleGANControler(self.opts) - self.net.eval() - self.net.cuda() - self.target_layers = [0, 1, 2, 3, 4, 5] - - def random_sample(self): - z1 = torch.randn(1, 512).to("cuda") - x1, w1, f1 = self.net.decoder( - [z1], - input_is_latent=False, - randomize_noise=False, - return_feature_map=True, - return_latents=True, - truncation=self.truncation, - truncation_latent=self.net.latent_avg[0], - ) - w1_initial = w1.clone() - x1 = self.net.face_pool(x1) - image = ( - ((x1.detach()[0].permute(1, 2, 0) + 1.0) * 127.5).cpu().numpy()[:, :, ::-1] - ) - return ( - image, - { - "w1": w1.cpu().detach().numpy(), - "w1_initial": w1_initial.cpu().detach().numpy(), - }, - ) # return latent vector along with the image - - def latents_to_tensor(self, latents): - w1 = latents["w1"] - w1_initial = latents["w1_initial"] - - w1 = torch.tensor(w1).to("cuda") - w1_initial = torch.tensor(w1_initial).to("cuda") - - x1, w1, f1 = self.net.decoder( - [w1], - input_is_latent=True, - randomize_noise=False, - return_feature_map=True, - return_latents=True, - ) - x1, w1_initial, f1 = self.net.decoder( - [w1_initial], - input_is_latent=True, - randomize_noise=False, - return_feature_map=True, - return_latents=True, - ) - - return (w1, w1_initial, f1) - - def transform( - self, - latents, - dz, - dxy, - sxsy=[0, 0], - stop_points=[], - zoom_in=False, - zoom_out=False, - ): - w1, w1_initial, f1 = self.latents_to_tensor(latents) - w1 = w1_initial.clone() - - dxyz = np.array([dxy[0], dxy[1], dz], dtype=np.float32) - dxy_norm = np.linalg.norm(dxyz[:2], ord=2) - epsilon = 1e-8 - dxy_norm = dxy_norm + epsilon - dxyz[:2] = dxyz[:2] / dxy_norm - vec_num = dxy_norm / 10 - - x = torch.from_numpy(np.array([[dxyz]], dtype=np.float32)).cuda() - f1 = torch.nn.functional.interpolate(f1, (256, 256)) - y = f1[:, :, sxsy[1], sxsy[0]].unsqueeze(0) - - if len(stop_points) > 0: - x = torch.cat( - [x, torch.zeros(x.shape[0], len(stop_points), x.shape[2]).cuda()], dim=1 - ) - tmp = [] - for sp in stop_points: - tmp.append(f1[:, :, sp[1], sp[0]].unsqueeze(1)) - y = torch.cat([y, torch.cat(tmp, dim=1)], dim=1) - - if not self.use_average_code_as_input: - w_hat = self.net.encoder( - w1[:, self.target_layers].detach(), - x.detach(), - y.detach(), - alpha=vec_num, - ) - w1 = w1.clone() - w1[:, self.target_layers] = w_hat - else: - w_hat = self.net.encoder( - self.net.latent_avg.unsqueeze(0)[:, self.target_layers].detach(), - x.detach(), - y.detach(), - alpha=vec_num, - ) - w1 = w1.clone() - w1[:, self.target_layers] = ( - w1.clone()[:, self.target_layers] - + w_hat - - self.net.latent_avg.unsqueeze(0)[:, self.target_layers] - ) - - x1, _ = self.net.decoder([w1], input_is_latent=True, randomize_noise=False) - - x1 = self.net.face_pool(x1) - result = ( - ((x1.detach()[0].permute(1, 2, 0) + 1.0) * 127.5).cpu().numpy()[:, :, ::-1] - ) - return ( - result, - { - "w1": w1.cpu().detach().numpy(), - "w1_initial": w1_initial.cpu().detach().numpy(), - }, - ) - - def change_style(self, latents): - w1, w1_initial, f1 = self.latents_to_tensor(latents) - w1 = w1_initial.clone() - - z1 = torch.randn(1, 512).to("cuda") - x1, w2 = self.net.decoder( - [z1], - input_is_latent=False, - randomize_noise=False, - return_latents=True, - truncation=self.truncation, - truncation_latent=self.net.latent_avg[0], - ) - w1[:, 6:] = w2.detach()[:, 0] - x1, w1_new = self.net.decoder( - [w1], - input_is_latent=True, - randomize_noise=False, - return_latents=True, - ) - result = ( - ((x1.detach()[0].permute(1, 2, 0) + 1.0) * 127.5).cpu().numpy()[:, :, ::-1] - ) - return ( - result, - { - "w1": w1_new.cpu().detach().numpy(), - "w1_initial": w1_initial.cpu().detach().numpy(), - }, - ) - - def reset(self, latents): - w1, w1_initial, f1 = self.latents_to_tensor(latents) - x1, w1_new, f1 = self.net.decoder( - [w1_initial], - input_is_latent=True, - randomize_noise=False, - return_feature_map=True, - return_latents=True, - ) - result = ( - ((x1.detach()[0].permute(1, 2, 0) + 1.0) * 127.5).cpu().numpy()[:, :, ::-1] - ) - return ( - result, - { - "w1": w1_new.cpu().detach().numpy(), - "w1_initial": w1_new.cpu().detach().numpy(), - }, - ) diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Geovision Gv 650 800 S V3.52 Drivers.zip 40 BEST.md b/spaces/raedeXanto/academic-chatgpt-beta/Geovision Gv 650 800 S V3.52 Drivers.zip 40 BEST.md deleted file mode 100644 index 858e8a62c8cb17c2e81ecdfc340d08fbf92dc09b..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Geovision Gv 650 800 S V3.52 Drivers.zip 40 BEST.md +++ /dev/null @@ -1,79 +0,0 @@ -
            -

            Geovision Gv 650 800 S V3.52 Drivers.zip 40: What You Need to Know

            -

            Introduction

            -

            If you are looking for a reliable and versatile video capture card for your digital video surveillance system, you might have come across Geovision Gv 650 800 S V3.52 Drivers.zip 40. This is a zip file that contains the drivers and software for the Geovision GV-650/800(S) V3.52 video capture card, which is compatible with Windows 7 32-bit operating system.

            -

            But what is Geovision Gv 650 800 S V3.52 Drivers.zip 40 exactly? Why do you need it? How do you install it on your computer? And how do you use it for your digital video surveillance system? In this article, we will answer these questions and more, so that you can make the most of your Geovision video capture card.

            -

            Geovision Gv 650 800 S V3.52 Drivers.zip 40


            Downloadhttps://tinourl.com/2uL5xb



            -

            How to install Geovision Gv 650 800 S V3.52 Drivers.zip 40 on Windows 7 32-bit

            -

            The first thing you need to do is to download Geovision Gv 650 800 S V3.52 Drivers.zip 40 from the official website of GeoVision Inc., which is a leading digital video surveillance system provider. You can find the download link here: http://geovisioncn.com/english/4_2_8000.htm. The zip file is about 1.5 GB in size, so make sure you have enough space on your hard drive and a stable internet connection.

            -

            Once you have downloaded the zip file, you need to extract it to a folder on your computer. You can use any zip extractor software, such as WinZip or WinRAR, to do this. After extracting the zip file, you will see a folder named "GV-8.0" that contains several subfolders and files.

            -

            The next step is to run the setup.exe file that is located in the "GV-8.0" folder. This will launch the installation wizard that will guide you through the installation process. You will need to accept the license agreement, choose the destination folder, select the components to install, and configure some settings.

            -

            The installation wizard will also detect your Geovision video capture card and install the appropriate driver for it. The version 8.0 only supports the following GV video capture cards: GV-250 All Series, GV-600(S) V3.21 V3.20, GV-650(S) V3.31 V3.30, GV-800(S) V3.31 V3.30, GV-1120 V1.02, GV-1240 V1.02, GV-1480 V1.02, GV800-4A V3.10. If you have a different model of Geovision video capture card, you will need to find another driver that matches your card and operating system.

            -

            After installing all the components and drivers, you will need to restart your computer for the changes to take effect. Once your computer has rebooted, you will be able to use your Geovision video capture card with Windows 7.

            -

            How to use Geovision Gv 650 800 S V3.52 Drivers.zip 40 for digital video surveillance

            -

            Now that you have installed Geovision Gv 650 800 S V3 . 52 Drivers . zip 40 on your computer, you can start using it for your digital video surveillance system. The zip file contains several applications that allow you to monitor, record, playback, and manage your video feeds from your cameras.

            -

            How to install Geovision Gv 650 800 S V3.52 drivers on Windows 7[^1^]
            -Geovision Gv 650 800 S V3.52 drivers download link[^2^]
            -Geovision Gv 650 800 S V3.52 drivers torrent file[^3^]
            -Geovision Gv 650 800 S V3.52 drivers compatibility issues
            -Geovision Gv 650 800 S V3.52 drivers update guide
            -Geovision Gv 650 800 S V3.52 drivers troubleshooting tips
            -Geovision Gv 650 800 S V3.52 drivers vs newer versions
            -Geovision Gv 650 800 S V3.52 drivers for Mac OS
            -Geovision Gv 650 800 S V3.52 drivers for Linux
            -Geovision Gv 650 800 S V3.52 drivers for CCTV cameras
            -Geovision Gv 650 800 S V3.52 drivers review and ratings
            -Geovision Gv 650 800 S V3.52 drivers alternatives and comparisons
            -Geovision Gv 650 800 S V3.52 drivers features and specifications
            -Geovision Gv 650 800 S V3.52 drivers installation video tutorial[^2^]
            -Geovision Gv 650 800 S V3.52 drivers user manual and documentation
            -Geovision Gv 650 800 S V3.52 drivers support and customer service
            -Geovision Gv 650 800 S V3.52 drivers warranty and refund policy
            -Geovision Gv 650 800 S V3.52 drivers discount and coupon codes
            -Geovision Gv 650 800 S V3.52 drivers best price and deals
            -Geovision Gv 650 800 S V3.52 drivers free trial and demo
            -How to uninstall Geovision Gv 650 800 S V3.52 drivers from your PC
            -How to fix Geovision Gv 650 800 S V3.52 drivers errors and bugs
            -How to optimize Geovision Gv 650 800 S V3.52 drivers performance and speed
            -How to backup and restore Geovision Gv 650 800 S V3.52 drivers settings and data
            -How to upgrade from Geovision Gv 650 800 S V3.52 drivers to newer versions
            -How to connect Geovision Gv 650 800 S V3.52 drivers to your network and devices
            -How to customize and configure Geovision Gv 650 800 S V3.52 drivers options and preferences
            -How to use Geovision Gv 650 800 S V3.52 drivers for different purposes and scenarios
            -How to integrate Geovision Gv 650 800 S V3.52 drivers with other software and tools
            -How to troubleshoot common problems with Geovision Gv 650 800 S V3.52 drivers
            -What are the benefits of using Geovision Gv 650 800 S V3.52 drivers for your business or personal needs
            -What are the drawbacks and limitations of using Geovision Gv 650

            -

            One of the main applications is the Main System, which is also known as Multicam System or DVR System. This application allows you to view live video from up to 16 cameras on one screen, record video based on various settings and schedules, play back recorded video with advanced search functions, set up motion detection, object monitoring, privacy mask, scene change detection, and other features. You can also control PTZ domes, I/O devices, and spot monitors from this application.

            -

            The Main System also supports POS Live Viewer, which is a feature that allows you to integrate your point-of-sale (POS) system with your digital video surveillance system. This way, you can monitor transactions from your POS terminals along with live video from your cameras. You can also search for POS data in recorded video and export them as text files or images. This feature is useful for preventing fraud, theft, and other irregularities in your business.

            -

            Another application that comes with Geovision Gv 650 800 S V3 . 52 Drivers . zip 40 is ViewLog, which is also known as EZViewLog or Playback System. This application allows you to play back recorded video from multiple channels simultaneously on one screen. You can also access a remote ViewLog through TCP/IP, which means you can view recorded video from another computer that has Geovision installed. You can use various search functions, such as date/time, event list, object index, and POS data, to find the video clips you want. You can also back up video files to CD/DVD or USB drives from this application.

            -

            Besides these two applications, there are also several remote applications that allow you to monitor and control your digital video surveillance system from anywhere via internet or mobile network. Some of these applications are:

            -
              -
            • Center V2: This application allows you to receive live video and audio from up to 500 channels of remote DVR systems on one screen. You can also control PTZ domes, I/O devices, and spot monitors of remote DVR systems from this application. You can also set up event notifications and E-Map features for easy management of multiple sites.
            • -
            • WebCam: This application allows you to view live video and audio from remote DVR systems via web browser. You can also control PTZ domes and I/O devices of remote DVR systems from this application. You can also remotely configure video attributes, recording settings, and I/O monitoring settings of remote DVR systems from this application.
            • -
            • Control Center: This application allows you to monitor and control multiple Center V2 servers and DVRs on one screen. You can also control PTZ domes, I/O devices, and spot monitors of remote DVRs from this application. You can also set up event notifications and E-Map features for easy management of multiple sites.
            • -
            • VSM: This application allows you to monitor live video and audio from up to 64 channels of remote DVR systems on one screen. You can also control PTZ domes and I/O devices of remote DVR systems from this application. You can also set up event notifications and E-Map features for easy management of multiple sites.
            • -
            • WebCam Server: This application allows you to stream live video and audio from your DVR system to the internet. You can also control PTZ domes and I/O devices of your DVR system from the internet. You can also remotely configure video attributes, recording settings, and I/O monitoring settings of your DVR system from the internet.
            • -
            • Authentication Server: This application allows you to set up user accounts and passwords for accessing your DVR system via Center V2, VSM, WebCam Server, or Remote Playback Client. You can also assign different access rights and privileges to different users or groups.
            • -
            • Remote Playback Client: This application allows you to play back recorded video from remote DVR systems via internet. You can also search for video clips by date/time, event list, object index, and POS data. You can also back up video files to CD/DVD or USB drives from this application.
            • -
            • Dispatch Server: This application allows you to distribute live video and audio from your DVR system to multiple Center V2 servers or VSM clients via internet. You can also set up E-Mail and SMS alerts for events that occur on your DVR system or Center V2 servers.
            • -
            • Mobile Server: This application allows you to stream live video and audio from your DVR system to 3G mobile phones via internet. You can also control PTZ domes and I/O devices of your DVR system from 3G mobile phones.
            • -
            -

            Conclusion

            -

            In conclusion, Geovision Gv 650 800 S V3.52 Drivers.zip 40 is a zip file that contains the drivers and software for the Geovision GV-650/800(S) V3.52 video capture card, which is compatible with Windows 7 32-bit operating system. By installing this zip file on your computer, you can use your Geovision video capture card for your digital video surveillance system. You can also use various applications that come with the zip file to monitor, record, playback, and manage your video feeds from your cameras. Geovision Gv 650 800 S V3.52 Drivers.zip 40 is a comprehensive solution for your digital video surveillance needs.

            -

            FAQs

            -
              -
            1. What are the system requirements for installing Geovision Gv 650 800 S V3.52 Drivers.zip 40?
            2. -

              The minimum system requirements are: Windows 7 32-bit operating system; Pentium 4 CPU 3.0 GHz or higher; 1 GB RAM or higher; AGP or PCI-Express graphic card with DirectDraw overlay support; DirectX 9.0c or above; Sound card (optional); Internet Explorer 7.x or above (for WebCam Server); Microsoft .NET Framework 4.0 (for Mobile Server); Microsoft Office Outlook (for Dispatch Server); CD/DVD Writer (optional); USB port (optional); GV-650/800(S) V3.52 video capture card.

              -
            3. How do I update Geovision Gv 650 800 S V3.52 Drivers.zip 40?
            4. -

              You can check for updates on the official website of GeoVision Inc., which is http://www.geovision.com.tw. You can also use the Live Update function in the Main System application to check for updates automatically.

              -
            5. How do I uninstall Geovision Gv 650 800 S V3.52 Drivers.zip 40?
            6. -

              You can uninstall Geovision Gv 650 800 S V3 . 52 Drivers . zip 40 by using the Add/Remove Programs function in the Control Panel of Windows 7. You can also use the Uninstall function in the Start menu of Geovision.

              -
            7. How do I contact GeoVision Inc. for technical support?
            8. -

              You can contact GeoVision Inc. by using the following methods: Phone: +886-2-8797-8377; Fax: +886-2-8797-8335; E-mail: support@geovision.com.tw; Website: http://www.geovision.com.tw.

              -
            9. How do I find more information about Geovision Gv 650 800 S V3 . 52 Drivers . zip 40?
            10. -

              You can find more information about Geovision Gv 650 800 S V3 . 52 Drivers . zip 40 by reading the user manual that comes with the zip file. You can also visit the official website of GeoVision Inc., which is http://www.geovision.com.tw, for more resources and tutorials.

              -
            -

            0a6ba089eb
            -
            -
            \ No newline at end of file diff --git a/spaces/randomarnab/Img_caption_project_using_ViT_GPT2/app.py b/spaces/randomarnab/Img_caption_project_using_ViT_GPT2/app.py deleted file mode 100644 index bec910bbdea20af58df3fbdf25d088e65cd39d56..0000000000000000000000000000000000000000 --- a/spaces/randomarnab/Img_caption_project_using_ViT_GPT2/app.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -"""Image Captioning with ViT+GPT2 -Automatically generated by Colaboratory. -Original file is located at - https://colab.research.google.com/drive/1P3O0gO5AUqSmM8rE9dxy2tXJ-9jkhxHz -""" - -#! pip install transformers -q - -#! pip install gradio -q - -from PIL import Image -from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, PreTrainedTokenizerFast -import requests - -model = VisionEncoderDecoderModel.from_pretrained("sachin/vit2distilgpt2") - -vit_feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k") - -tokenizer = PreTrainedTokenizerFast.from_pretrained("distilgpt2") - -# url = 'https://d2gp644kobdlm6.cloudfront.net/wp-content/uploads/2016/06/bigstock-Shocked-and-surprised-boy-on-t-113798588-300x212.jpg' - -# with Image.open(requests.get(url, stream=True).raw) as img: -# pixel_values = vit_feature_extractor(images=img, return_tensors="pt").pixel_values - -#encoder_outputs = model.generate(pixel_values.to('cpu'),num_beams=5) - -#generated_sentences = tokenizer.batch_decode(encoder_outputs, skip_special_tokens=True) - -#generated_sentences - -#naive text processing -#generated_sentences[0].split('.')[0] - -# inference function - -def vit2distilgpt2(img): - pixel_values = vit_feature_extractor(images=img, return_tensors="pt").pixel_values - encoder_outputs = generated_ids = model.generate(pixel_values.to('cpu'),num_beams=5) - generated_sentences = tokenizer.batch_decode(encoder_outputs, skip_special_tokens=True) - - return(generated_sentences[0].split('.')[0]) - -#!wget https://media.glamour.com/photos/5f171c4fd35176eaedb36823/master/w_2560%2Cc_limit/bike.jpg - -import gradio as gr - -inputs = [ - gr.inputs.Image(type="pil", label="Original Image") -] - -outputs = [ - gr.outputs.Textbox(label = 'Caption') -] - -title = "Image Captioning using ViT + GPT2" -description = "ViT and GPT2 are used to generate Image Caption for the uploaded image. COCO Dataset was used for training. This image captioning model might have some biases that we couldn't figure during our stress testing, so if you find any bias (gender, race and so on) please use `Flag` button to flag the image with bias" -article = " Model Repo on Hugging Face Model Hub" -examples = [ - ["people-walking-street-pedestrian-crossing-traffic-light-city.jpeg"], - ["elonmusk.jpeg"] - -] - -gr.Interface( - vit2distilgpt2, - inputs, - outputs, - title=title, - description=description, - article=article, - examples=examples, - theme="huggingface", -).launch(debug=True, enable_queue=True) \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Premiere Pro CC 2015 V9.0 Crack Download Pc [TOP].md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Premiere Pro CC 2015 V9.0 Crack Download Pc [TOP].md deleted file mode 100644 index 54773f0b79b9d1e2a8c17ebac96f27d874a07886..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Premiere Pro CC 2015 V9.0 Crack Download Pc [TOP].md +++ /dev/null @@ -1,10 +0,0 @@ -

            Adobe Premiere Pro CC 2015 v9.0 Crack download pc


            DOWNLOAD ->>->>->> https://urlgoal.com/2uCLFX



            -
            -adobe premiere pro cc 2015 v9.0 pc + keygen + torrent -adobe premiere pro cc 2015 v9.0 pc + keygen + torrent -by admin | March 2, 2015 -As a rule, they are always at hand and. In this case, all you need is to download the program. -Over the past few days, I have seen a lot of people who tried to search something on Google, but ended up with nothing - the problem that many people have is that people are looking for how to download Adobe Photoshop CS6 for free. 8a78ff9644
            -
            -
            -

            diff --git a/spaces/reha/Stick_Tech/inference/slicer.py b/spaces/reha/Stick_Tech/inference/slicer.py deleted file mode 100644 index 35a888b906e7df8634cfdcec914f650c6cefd26a..0000000000000000000000000000000000000000 --- a/spaces/reha/Stick_Tech/inference/slicer.py +++ /dev/null @@ -1,158 +0,0 @@ -import time - -import numpy as np -import torch -import torchaudio -from scipy.ndimage import maximum_filter1d, uniform_filter1d - - -def timeit(func): - def run(*args, **kwargs): - t = time.time() - res = func(*args, **kwargs) - print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t)) - return res - - return run - - -# @timeit -def _window_maximum(arr, win_sz): - return maximum_filter1d(arr, size=win_sz)[win_sz // 2: win_sz // 2 + arr.shape[0] - win_sz + 1] - - -# @timeit -def _window_rms(arr, win_sz): - filtered = np.sqrt(uniform_filter1d(np.power(arr, 2), win_sz) - np.power(uniform_filter1d(arr, win_sz), 2)) - return filtered[win_sz // 2: win_sz // 2 + arr.shape[0] - win_sz + 1] - - -def level2db(levels, eps=1e-12): - return 20 * np.log10(np.clip(levels, a_min=eps, a_max=1)) - - -def _apply_slice(audio, begin, end): - if len(audio.shape) > 1: - return audio[:, begin: end] - else: - return audio[begin: end] - - -class Slicer: - def __init__(self, - sr: int, - db_threshold: float = -40, - min_length: int = 5000, - win_l: int = 300, - win_s: int = 20, - max_silence_kept: int = 500): - self.db_threshold = db_threshold - self.min_samples = round(sr * min_length / 1000) - self.win_ln = round(sr * win_l / 1000) - self.win_sn = round(sr * win_s / 1000) - self.max_silence = round(sr * max_silence_kept / 1000) - if not self.min_samples >= self.win_ln >= self.win_sn: - raise ValueError('The following condition must be satisfied: min_length >= win_l >= win_s') - if not self.max_silence >= self.win_sn: - raise ValueError('The following condition must be satisfied: max_silence_kept >= win_s') - - @timeit - def slice(self, audio): - samples = audio - if samples.shape[0] <= self.min_samples: - return {"0": {"slice": False, "split_time": f"0,{len(audio)}"}} - # get absolute amplitudes - abs_amp = np.abs(samples - np.mean(samples)) - # calculate local maximum with large window - win_max_db = level2db(_window_maximum(abs_amp, win_sz=self.win_ln)) - sil_tags = [] - left = right = 0 - while right < win_max_db.shape[0]: - if win_max_db[right] < self.db_threshold: - right += 1 - elif left == right: - left += 1 - right += 1 - else: - if left == 0: - split_loc_l = left - else: - sil_left_n = min(self.max_silence, (right + self.win_ln - left) // 2) - rms_db_left = level2db(_window_rms(samples[left: left + sil_left_n], win_sz=self.win_sn)) - split_win_l = left + np.argmin(rms_db_left) - split_loc_l = split_win_l + np.argmin(abs_amp[split_win_l: split_win_l + self.win_sn]) - if len(sil_tags) != 0 and split_loc_l - sil_tags[-1][1] < self.min_samples and right < win_max_db.shape[ - 0] - 1: - right += 1 - left = right - continue - if right == win_max_db.shape[0] - 1: - split_loc_r = right + self.win_ln - else: - sil_right_n = min(self.max_silence, (right + self.win_ln - left) // 2) - rms_db_right = level2db(_window_rms(samples[right + self.win_ln - sil_right_n: right + self.win_ln], - win_sz=self.win_sn)) - split_win_r = right + self.win_ln - sil_right_n + np.argmin(rms_db_right) - split_loc_r = split_win_r + np.argmin(abs_amp[split_win_r: split_win_r + self.win_sn]) - sil_tags.append((split_loc_l, split_loc_r)) - right += 1 - left = right - if left != right: - sil_left_n = min(self.max_silence, (right + self.win_ln - left) // 2) - rms_db_left = level2db(_window_rms(samples[left: left + sil_left_n], win_sz=self.win_sn)) - split_win_l = left + np.argmin(rms_db_left) - split_loc_l = split_win_l + np.argmin(abs_amp[split_win_l: split_win_l + self.win_sn]) - sil_tags.append((split_loc_l, samples.shape[0])) - if len(sil_tags) == 0: - return {"0": {"slice": False, "split_time": f"0,{len(audio)}"}} - else: - chunks = [] - # 第一段静音并非从头开始,补上有声片段 - if sil_tags[0][0]: - chunks.append({"slice": False, "split_time": f"0,{sil_tags[0][0]}"}) - for i in range(0, len(sil_tags)): - # 标识有声片段(跳过第一段) - if i: - chunks.append({"slice": False, "split_time": f"{sil_tags[i - 1][1]},{sil_tags[i][0]}"}) - # 标识所有静音片段 - chunks.append({"slice": True, "split_time": f"{sil_tags[i][0]},{sil_tags[i][1]}"}) - # 最后一段静音并非结尾,补上结尾片段 - if sil_tags[-1][1] != len(audio): - chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1]},{len(audio)}"}) - chunk_dict = {} - for i in range(len(chunks)): - chunk_dict[str(i)] = chunks[i] - return chunk_dict - - -def cut(audio_path, db_thresh=-30, min_len=5000, win_l=300, win_s=20, max_sil_kept=500): - audio, sr = torchaudio.load(audio_path) - if len(audio.shape) == 2 and audio.shape[1] >= 2: - audio = torch.mean(audio, dim=0).unsqueeze(0) - audio = audio.cpu().numpy()[0] - - slicer = Slicer( - sr=sr, - db_threshold=db_thresh, - min_length=min_len, - win_l=win_l, - win_s=win_s, - max_silence_kept=max_sil_kept - ) - chunks = slicer.slice(audio) - return chunks - - -def chunks2audio(audio_path, chunks): - chunks = dict(chunks) - audio, sr = torchaudio.load(audio_path) - if len(audio.shape) == 2 and audio.shape[1] >= 2: - audio = torch.mean(audio, dim=0).unsqueeze(0) - audio = audio.cpu().numpy()[0] - result = [] - for k, v in chunks.items(): - tag = v["split_time"].split(",") - result.append((v["slice"], audio[int(tag[0]):int(tag[1])])) - return result, sr - - diff --git a/spaces/renumics/cifar100-sliceguard-demo/Dockerfile b/spaces/renumics/cifar100-sliceguard-demo/Dockerfile deleted file mode 100644 index 64c971eb000f4e522005eefff5e05c6840282000..0000000000000000000000000000000000000000 --- a/spaces/renumics/cifar100-sliceguard-demo/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM python:3.9 - -WORKDIR /code -ENV HOME=/code -COPY ./renumics_spotlight-1.3.0.post9+b32d1a8-py3-none-any.whl /code/renumics_spotlight-1.3.0.post9+b32d1a8-py3-none-any.whl -# RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -RUN apt install curl -RUN pip install pip -U - -RUN pip install renumics_spotlight-1.3.0.post9+b32d1a8-py3-none-any.whl - - -RUN pip install pyarrow datasets cleanvision - -COPY . . -RUN mkdir -p /code/.cache -RUN chmod -R 777 /code -RUN python prepare.py -CMD ["python", "run.py"] diff --git a/spaces/riccorl/relik-entity-linking/relik/reader/pytorch_modules/optim/__init__.py b/spaces/riccorl/relik-entity-linking/relik/reader/pytorch_modules/optim/__init__.py deleted file mode 100644 index 369091133267cfa05240306fbfe5ea3b537d5d9c..0000000000000000000000000000000000000000 --- a/spaces/riccorl/relik-entity-linking/relik/reader/pytorch_modules/optim/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from relik.reader.pytorch_modules.optim.adamw_with_warmup import ( - AdamWWithWarmupOptimizer, -) -from relik.reader.pytorch_modules.optim.layer_wise_lr_decay import ( - LayerWiseLRDecayOptimizer, -) diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/core/post_processing/matrix_nms.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/core/post_processing/matrix_nms.py deleted file mode 100644 index 9dc8c4f74e28127fb69ccc684f0bdb2bd3943b20..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/core/post_processing/matrix_nms.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - - -def mask_matrix_nms(masks, - labels, - scores, - filter_thr=-1, - nms_pre=-1, - max_num=-1, - kernel='gaussian', - sigma=2.0, - mask_area=None): - """Matrix NMS for multi-class masks. - - Args: - masks (Tensor): Has shape (num_instances, h, w) - labels (Tensor): Labels of corresponding masks, - has shape (num_instances,). - scores (Tensor): Mask scores of corresponding masks, - has shape (num_instances). - filter_thr (float): Score threshold to filter the masks - after matrix nms. Default: -1, which means do not - use filter_thr. - nms_pre (int): The max number of instances to do the matrix nms. - Default: -1, which means do not use nms_pre. - max_num (int, optional): If there are more than max_num masks after - matrix, only top max_num will be kept. Default: -1, which means - do not use max_num. - kernel (str): 'linear' or 'gaussian'. - sigma (float): std in gaussian method. - mask_area (Tensor): The sum of seg_masks. - - Returns: - tuple(Tensor): Processed mask results. - - - scores (Tensor): Updated scores, has shape (n,). - - labels (Tensor): Remained labels, has shape (n,). - - masks (Tensor): Remained masks, has shape (n, w, h). - - keep_inds (Tensor): The indices number of - the remaining mask in the input mask, has shape (n,). - """ - assert len(labels) == len(masks) == len(scores) - if len(labels) == 0: - return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros( - 0, *masks.shape[-2:]), labels.new_zeros(0) - if mask_area is None: - mask_area = masks.sum((1, 2)).float() - else: - assert len(masks) == len(mask_area) - - # sort and keep top nms_pre - scores, sort_inds = torch.sort(scores, descending=True) - - keep_inds = sort_inds - if nms_pre > 0 and len(sort_inds) > nms_pre: - sort_inds = sort_inds[:nms_pre] - keep_inds = keep_inds[:nms_pre] - scores = scores[:nms_pre] - masks = masks[sort_inds] - mask_area = mask_area[sort_inds] - labels = labels[sort_inds] - - num_masks = len(labels) - flatten_masks = masks.reshape(num_masks, -1).float() - # inter. - inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0)) - expanded_mask_area = mask_area.expand(num_masks, num_masks) - # Upper triangle iou matrix. - iou_matrix = (inter_matrix / - (expanded_mask_area + expanded_mask_area.transpose(1, 0) - - inter_matrix)).triu(diagonal=1) - # label_specific matrix. - expanded_labels = labels.expand(num_masks, num_masks) - # Upper triangle label matrix. - label_matrix = (expanded_labels == expanded_labels.transpose( - 1, 0)).triu(diagonal=1) - - # IoU compensation - compensate_iou, _ = (iou_matrix * label_matrix).max(0) - compensate_iou = compensate_iou.expand(num_masks, - num_masks).transpose(1, 0) - - # IoU decay - decay_iou = iou_matrix * label_matrix - - # Calculate the decay_coefficient - if kernel == 'gaussian': - decay_matrix = torch.exp(-1 * sigma * (decay_iou**2)) - compensate_matrix = torch.exp(-1 * sigma * (compensate_iou**2)) - decay_coefficient, _ = (decay_matrix / compensate_matrix).min(0) - elif kernel == 'linear': - decay_matrix = (1 - decay_iou) / (1 - compensate_iou) - decay_coefficient, _ = decay_matrix.min(0) - else: - raise NotImplementedError( - f'{kernel} kernel is not supported in matrix nms!') - # update the score. - scores = scores * decay_coefficient - - if filter_thr > 0: - keep = scores >= filter_thr - keep_inds = keep_inds[keep] - if not keep.any(): - return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros( - 0, *masks.shape[-2:]), labels.new_zeros(0) - masks = masks[keep] - scores = scores[keep] - labels = labels[keep] - - # sort and keep top max_num - scores, sort_inds = torch.sort(scores, descending=True) - keep_inds = keep_inds[sort_inds] - if max_num > 0 and len(sort_inds) > max_num: - sort_inds = sort_inds[:max_num] - keep_inds = keep_inds[:max_num] - scores = scores[:max_num] - masks = masks[sort_inds] - labels = labels[sort_inds] - - return scores, labels, masks, keep_inds diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/detectors/detr.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/detectors/detr.py deleted file mode 100644 index 06d76913be64b98e3a497c043cf71c7d2d4491ae..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/detectors/detr.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch - -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class DETR(SingleStageDetector): - r"""Implementation of `DETR: End-to-End Object Detection with - Transformers `_""" - - def __init__(self, - backbone, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(DETR, self).__init__(backbone, None, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) - - # over-write `forward_dummy` because: - # the forward of bbox_head requires img_metas - def forward_dummy(self, img): - """Used for computing network flops. - - See `mmdetection/tools/analysis_tools/get_flops.py` - """ - warnings.warn('Warning! MultiheadAttention in DETR does not ' - 'support flops computation! Do not use the ' - 'results in your papers!') - - batch_size, _, height, width = img.shape - dummy_img_metas = [ - dict( - batch_input_shape=(height, width), - img_shape=(height, width, 3)) for _ in range(batch_size) - ] - x = self.extract_feat(img) - outs = self.bbox_head(x, dummy_img_metas) - return outs - - # over-write `onnx_export` because: - # (1) the forward of bbox_head requires img_metas - # (2) the different behavior (e.g. construction of `masks`) between - # torch and ONNX model, during the forward of bbox_head - def onnx_export(self, img, img_metas): - """Test function for exporting to ONNX, without test time augmentation. - - Args: - img (torch.Tensor): input images. - img_metas (list[dict]): List of image information. - - Returns: - tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] - and class labels of shape [N, num_det]. - """ - x = self.extract_feat(img) - # forward of this head requires img_metas - outs = self.bbox_head.forward_onnx(x, img_metas) - # get shape as tensor - img_shape = torch._shape_as_tensor(img)[2:] - img_metas[0]['img_shape_for_onnx'] = img_shape - - det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas) - - return det_bboxes, det_labels diff --git a/spaces/rorallitri/biomedical-language-models/logs/Cif Usb Pc Camera Dc 2110 Driver __LINK__.md b/spaces/rorallitri/biomedical-language-models/logs/Cif Usb Pc Camera Dc 2110 Driver __LINK__.md deleted file mode 100644 index 31edadbc9de204d2f307b40ef0c2fe3577764d07..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Cif Usb Pc Camera Dc 2110 Driver __LINK__.md +++ /dev/null @@ -1,46 +0,0 @@ -

            cif usb pc camera dc 2110 driver


            Download Zip ››››› https://tinurll.com/2uzm39



            -
            -The USB Camera (also known as USB webcam) is a basic integrated device that contains an array of video and still imaging options, that can be connected directly to a USB port on a computer. It consists of a miniature digital camera sensor, an LED light source, image processing hardware, microcontrollers, and firmware. (As a matter of fact, any gadget that can be considered a webcam and can be connected to a USB port can be called a USB webcam.) It works by capturing images from the source and sending them over the Internet via a standard web browser. The advantages of the USB Camera are, but not limited to, the quickness and simplicity to set up, the ability to connect to any USB port (not just USB 2.0 ports), a wide variety of options, and a possibility to view the pictures remotely through a web browser. We have listed some of the most common and useful USB Cameras below. - -The Camera also has two drivers for Windows 7 or later operating systems: dc2110.sys and vc2021.sys. These two are what Windows 7 and later use to capture and decode video data. - -Use this guide to find the latest version of the dc2110.sys and vc2021.sys drivers for your computer, by using Windows 7 or later versions (Build 6100). If you are not sure what your Operating System is, or have not installed Windows 7 or later on your computer, please refer to the System Requirements section of this guide. - -Windows 7 or later: - -- 64 bit Windows 7 (Build 6100) - -- Windows 8 or later - -Windows 8 or later (Build 8100) - -- 64 bit Windows 8 - -- Windows 8.1 - -- Windows 10 - -Windows 10 (Build 1607) - -- 64 bit Windows 10 - -- Windows 10 mobile - -DC2110 Driver - 64 Bit Windows 7 or later - -Description - -The Camera is tested using the latest versions of the Windows 7 and Windows 8 drivers for the DC2110. Windows 10 is not officially supported, as the Camera is not tested using the latest versions of the drivers for Windows 10. - -After installation of the Windows 7 or later drivers, the DC2110 will be listed as a new hardware device in the Device Manager. - -Driver Status - -Current Status: - -The current driver for Windows 7 or later systems is Vc2021. - -The current driver 4fefd39f24
            -
            -
            -

            diff --git a/spaces/sadjava/emotion-classification/app.py b/spaces/sadjava/emotion-classification/app.py deleted file mode 100644 index 264c554e528602f8ee2fdecab8f41dc0fd366a23..0000000000000000000000000000000000000000 --- a/spaces/sadjava/emotion-classification/app.py +++ /dev/null @@ -1,43 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../app.ipynb. - -# %% auto 0 -__all__ = ['device', 'model', 'CLASS_LABELS', 'image', 'label', 'examples', 'intf', 'classify_emotions'] - -# %% ../app.ipynb 2 -import gradio as gr -import torch -from torch.nn.functional import softmax -import numpy as np -from PIL import Image - -# %% ../app.ipynb 3 -device = "cuda" if torch.cuda.is_available() else "cpu" - -model = torch.load('model.pth', map_location=torch.device('cpu')).to(device) -model.eval() - -# %% ../app.ipynb 4 -CLASS_LABELS = ['Anger', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sadness', "Surprise"] - -# %% ../app.ipynb 5 -def classify_emotions(im): - im = np.array(im) - im = np.array(Image.fromarray(im).convert('L')) / 255 - im = im[..., np.newaxis] - im = np.concatenate((im, im, im), 2) - im = torch.tensor(im.transpose(2, 0, 1), dtype=torch.float32) - prediction = model.forward(im[np.newaxis, ...].to(device)) - return dict(zip(CLASS_LABELS, *softmax(prediction, dim=1).tolist())) - - -# %% ../app.ipynb 6 -image = gr.inputs.Image((48, 48)) -label = gr.outputs.Label() -examples = ['happy.png', 'fear.png', 'anger.png'] - -intf = gr.Interface(fn=classify_emotions, - inputs=image, - outputs=label, - title='Emotion classification', - examples=examples) -intf.launch(inline=False) diff --git a/spaces/samueldomdey/SentimentAnalysisSingle/README.md b/spaces/samueldomdey/SentimentAnalysisSingle/README.md deleted file mode 100644 index 92cc41c7858a36b98173a2a1220ec8761020bc0d..0000000000000000000000000000000000000000 --- a/spaces/samueldomdey/SentimentAnalysisSingle/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: SentimentAnalysisSingle -emoji: 🚀 -colorFrom: blue -colorTo: pink -sdk: gradio -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/sanjayw/starcoder-playground/settings.py b/spaces/sanjayw/starcoder-playground/settings.py deleted file mode 100644 index c9e5bb50b5eafeaf325ad36575418c90566acc72..0000000000000000000000000000000000000000 --- a/spaces/sanjayw/starcoder-playground/settings.py +++ /dev/null @@ -1,16 +0,0 @@ -# URLs for the StarCoder Models/APIs -DEFAULT_HUGGINGFACE_MODELS_API_BASE_URL = "https://api-inference.huggingface.co/models/" -DEFAULT_STARCODER_API_PATH = "bigcode/starcoder/" -DEFAULT_STARCODER_BASE_API_PATH = "bigcode/starcoderbase/" -FIM_INDICATOR = "" -DEFAULT_PORT = 7860 - -STATIC_PATH = "static" - -DEFAULT_SETTINGS = dict( - temperature = 0.9, - max_new_tokens = 256, - top_p = 0.95, - repetition_penalty = 1.0, - version = "StarCoder", -) diff --git a/spaces/sarat2hf/table_in_image_to_csv_app/README.md b/spaces/sarat2hf/table_in_image_to_csv_app/README.md deleted file mode 100644 index 71c49c479af1063f3d762bc9be3646cca2d7471b..0000000000000000000000000000000000000000 --- a/spaces/sarat2hf/table_in_image_to_csv_app/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Table In Image To Csv App -emoji: 🐢 -colorFrom: purple -colorTo: yellow -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sarinam/speaker-anonymization/IMSToucan/Layers/ResidualBlock.py b/spaces/sarinam/speaker-anonymization/IMSToucan/Layers/ResidualBlock.py deleted file mode 100644 index f80d15901c0c7d4475a5f038e0aa2883aa4f2a48..0000000000000000000000000000000000000000 --- a/spaces/sarinam/speaker-anonymization/IMSToucan/Layers/ResidualBlock.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -References: - - https://github.com/jik876/hifi-gan - - https://github.com/kan-bayashi/ParallelWaveGAN -""" - -import torch - - -class Conv1d(torch.nn.Conv1d): - """ - Conv1d module with customized initialization. - """ - - def __init__(self, *args, **kwargs): - super(Conv1d, self).__init__(*args, **kwargs) - - def reset_parameters(self): - torch.nn.init.kaiming_normal_(self.weight, nonlinearity="relu") - if self.bias is not None: - torch.nn.init.constant_(self.bias, 0.0) - - -class Conv1d1x1(Conv1d): - """ - 1x1 Conv1d with customized initialization. - """ - - def __init__(self, in_channels, out_channels, bias): - super(Conv1d1x1, self).__init__(in_channels, out_channels, kernel_size=1, padding=0, dilation=1, bias=bias) - - -class HiFiGANResidualBlock(torch.nn.Module): - """Residual block module in HiFiGAN.""" - - def __init__(self, - kernel_size=3, - channels=512, - dilations=(1, 3, 5), - bias=True, - use_additional_convs=True, - nonlinear_activation="LeakyReLU", - nonlinear_activation_params={"negative_slope": 0.1}, ): - """ - Initialize HiFiGANResidualBlock module. - - Args: - kernel_size (int): Kernel size of dilation convolution layer. - channels (int): Number of channels for convolution layer. - dilations (List[int]): List of dilation factors. - use_additional_convs (bool): Whether to use additional convolution layers. - bias (bool): Whether to add bias parameter in convolution layers. - nonlinear_activation (str): Activation function module name. - nonlinear_activation_params (dict): Hyperparameters for activation function. - """ - super().__init__() - self.use_additional_convs = use_additional_convs - self.convs1 = torch.nn.ModuleList() - if use_additional_convs: - self.convs2 = torch.nn.ModuleList() - assert kernel_size % 2 == 1, "Kernel size must be odd number." - for dilation in dilations: - self.convs1 += [torch.nn.Sequential(getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), - torch.nn.Conv1d(channels, - channels, - kernel_size, - 1, - dilation=dilation, - bias=bias, - padding=(kernel_size - 1) // 2 * dilation, ), )] - if use_additional_convs: - self.convs2 += [torch.nn.Sequential(getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), - torch.nn.Conv1d(channels, - channels, - kernel_size, - 1, - dilation=1, - bias=bias, - padding=(kernel_size - 1) // 2, ), )] - - def forward(self, x): - """ - Calculate forward propagation. - - Args: - x (Tensor): Input tensor (B, channels, T). - - Returns: - Tensor: Output tensor (B, channels, T). - """ - for idx in range(len(self.convs1)): - xt = self.convs1[idx](x) - if self.use_additional_convs: - xt = self.convs2[idx](xt) - x = xt + x - return x diff --git a/spaces/scedlatioru/img-to-music/example/Force 2 Movie Torrent.md b/spaces/scedlatioru/img-to-music/example/Force 2 Movie Torrent.md deleted file mode 100644 index a8f7c1a2df965fd005a6511554edcb899210b242..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Force 2 Movie Torrent.md +++ /dev/null @@ -1,12 +0,0 @@ -

            Force 2 Movie Torrent


            Download File ››››› https://gohhs.com/2uEAH4



            -
            -Find Dark Crystal (CE) (2 DVD) [Italian Edition] on Amazon.com Movies & TV, the home of thousands of titles on DVD and Blu-ray. TV. Films. ... " -(CE) Dark Crystal" (Russian) -The Dark Crystal is the first volume in the Dark Souls series, created by Japanese screenwriter Shigeru Mizuki and published by Comme il faut in 2009. -Tom talks about the squad led by Kaguya. -The book is written in a visual novel style that uses the QSP system. -In addition to the main character, Kaguya, the team also includes Inugami, Yui, Nagi, and Sakisaka. -In 2011, the second book in the series was released - "(CE) Dark Crystal 2: Darkness Rises". 8a78ff9644
            -
            -
            -

            diff --git a/spaces/scedlatioru/img-to-music/example/HD Online Player (xmlbar Video Download _VERIFIED_er Vip Crack).md b/spaces/scedlatioru/img-to-music/example/HD Online Player (xmlbar Video Download _VERIFIED_er Vip Crack).md deleted file mode 100644 index d89931ae7fffa2d983470849cf3e3ff27b3d3fec..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/HD Online Player (xmlbar Video Download _VERIFIED_er Vip Crack).md +++ /dev/null @@ -1,56 +0,0 @@ - -

            HD Online Player (xmlbar video downloader vip crack)

            - -

            Do you want to watch HD videos online without any hassle? Do you want to download HD videos from various websites and play them offline? If yes, then you need an HD online player that can do both. And one of the best HD online players that you can use is xmlbar video downloader vip crack.

            -

            HD Online Player (xmlbar video downloader vip crack)


            Download Ziphttps://gohhs.com/2uEySP



            - -

            Xmlbar video downloader vip crack is a software that allows you to download and play HD videos from various websites, such as YouTube, Vimeo, Dailymotion, Facebook, and more. You can download videos in different formats, such as mp4, 3gp, flv, avi, wmv, etc. You can also choose the quality of the video, such as 1080p, 720p, 480p, etc.

            - -

            But what makes xmlbar video downloader vip crack stand out from other HD online players is that it can also crack the encryption of some websites that prevent you from downloading their videos. For example, some websites use DRM (digital rights management) to protect their videos from being downloaded or copied. Xmlbar video downloader vip crack can bypass this protection and download the videos for you.

            - -

            How to Use HD Online Player (xmlbar video downloader vip crack)

            - -

            Using HD online player (xmlbar video downloader vip crack) is very easy and simple. You just need to follow these steps:

            -

            - -
              -
            1. Download and install xmlbar video downloader vip crack from its official website or from a trusted source.
            2. -
            3. Launch the software and you will see a simple interface with a few buttons and options.
            4. -
            5. Copy the URL of the video that you want to download and paste it in the software.
            6. -
            7. Select the format and quality of the video that you want to download.
            8. -
            9. Click on the download button and wait for the software to download the video for you.
            10. -
            11. Once the download is complete, you can find the video in the output folder that you have chosen.
            12. -
            13. You can also play the video with the built-in player of the software or with any other media player that you have.
            14. -
            - -

            That's it! You have successfully used HD online player (xmlbar video downloader vip crack) to download and play HD videos online.

            - -

            Why Choose HD Online Player (xmlbar video downloader vip crack)

            - -

            There are many reasons why you should choose HD online player (xmlbar video downloader vip crack) over other HD online players. Here are some of them:

            - -
              -
            • It is fast and reliable. It can download HD videos from various websites in a short time and without any errors.
            • -
            • It is easy and simple. It has a user-friendly interface that anyone can use without any difficulty.
            • -
            • It is versatile and powerful. It can download videos in different formats and qualities. It can also crack the encryption of some websites that block downloading.
            • -
            • It is free and safe. It does not cost anything to use it and it does not contain any viruses, malware, or ads that can harm your device or compromise your privacy.
            • -
            - -

            These are some of the benefits of using HD online player (xmlbar video downloader vip crack). You will not regret choosing this software for your HD online video needs.

            - -

            Conclusion

            - -

            HD online player (xmlbar video downloader vip crack) is a software that allows you to download and play HD videos from various websites online. It can also crack the encryption of some websites that prevent downloading. It is fast, reliable, easy, versatile, powerful, free, and safe. It is one of the best HD online players that you can use.

            - -

            If you are looking for an HD online player (xmlbar video downloader vip crack), you should try xmlbar video downloader vip crack. You can download it from its official website or from a trusted source. You can use it to download and play HD videos from various websites in different formats and qualities. You will enjoy watching HD videos online with this software.

            - -

            So what are you waiting for? Download xmlbar video downloader vip crack today and enjoy watching HD videos online!

            -

            Conclusion

            - -

            HD online player (xmlbar video downloader vip crack) is a software that allows you to download and play HD videos from various websites online. It can also crack the encryption of some websites that prevent downloading. It is fast, reliable, easy, versatile, powerful, free, and safe. It is one of the best HD online players that you can use.

            - -

            If you are looking for an HD online player (xmlbar video downloader vip crack), you should try xmlbar video downloader vip crack. You can download it from its official website or from a trusted source. You can use it to download and play HD videos from various websites in different formats and qualities. You will enjoy watching HD videos online with this software.

            - -

            So what are you waiting for? Download xmlbar video downloader vip crack today and enjoy watching HD videos online!

            3cee63e6c2
            -
            -
            \ No newline at end of file diff --git a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/e2e_vc_tacotron2.py b/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/e2e_vc_tacotron2.py deleted file mode 100644 index e876c42cf7c6c07a58406dcff7a31fbd3649c10f..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/e2e_vc_tacotron2.py +++ /dev/null @@ -1,782 +0,0 @@ -# Copyright 2020 Nagoya University (Wen-Chin Huang) -# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) - -"""Tacotron2-VC related modules.""" - -import logging - -from distutils.util import strtobool - -import numpy as np -import torch -import torch.nn.functional as F - -from espnet.nets.pytorch_backend.rnn.attentions import AttForward -from espnet.nets.pytorch_backend.rnn.attentions import AttForwardTA -from espnet.nets.pytorch_backend.rnn.attentions import AttLoc -from espnet.nets.pytorch_backend.tacotron2.cbhg import CBHG -from espnet.nets.pytorch_backend.tacotron2.cbhg import CBHGLoss -from espnet.nets.pytorch_backend.tacotron2.decoder import Decoder -from espnet.nets.pytorch_backend.tacotron2.encoder import Encoder -from espnet.nets.tts_interface import TTSInterface -from espnet.utils.fill_missing_args import fill_missing_args -from espnet.nets.pytorch_backend.e2e_tts_tacotron2 import ( - GuidedAttentionLoss, # noqa: H301 - Tacotron2Loss, # noqa: H301 -) - - -class Tacotron2(TTSInterface, torch.nn.Module): - """VC Tacotron2 module for VC. - - This is a module of Tacotron2-based VC model, - which convert the sequence of acoustic features - into the sequence of acoustic features. - """ - - @staticmethod - def add_arguments(parser): - """Add model-specific arguments to the parser.""" - group = parser.add_argument_group("tacotron 2 model setting") - # encoder - group.add_argument( - "--elayers", default=1, type=int, help="Number of encoder layers" - ) - group.add_argument( - "--eunits", - "-u", - default=512, - type=int, - help="Number of encoder hidden units", - ) - group.add_argument( - "--econv-layers", - default=3, - type=int, - help="Number of encoder convolution layers", - ) - group.add_argument( - "--econv-chans", - default=512, - type=int, - help="Number of encoder convolution channels", - ) - group.add_argument( - "--econv-filts", - default=5, - type=int, - help="Filter size of encoder convolution", - ) - # attention - group.add_argument( - "--atype", - default="location", - type=str, - choices=["forward_ta", "forward", "location"], - help="Type of attention mechanism", - ) - group.add_argument( - "--adim", - default=512, - type=int, - help="Number of attention transformation dimensions", - ) - group.add_argument( - "--aconv-chans", - default=32, - type=int, - help="Number of attention convolution channels", - ) - group.add_argument( - "--aconv-filts", - default=15, - type=int, - help="Filter size of attention convolution", - ) - group.add_argument( - "--cumulate-att-w", - default=True, - type=strtobool, - help="Whether or not to cumulate attention weights", - ) - # decoder - group.add_argument( - "--dlayers", default=2, type=int, help="Number of decoder layers" - ) - group.add_argument( - "--dunits", default=1024, type=int, help="Number of decoder hidden units" - ) - group.add_argument( - "--prenet-layers", default=2, type=int, help="Number of prenet layers" - ) - group.add_argument( - "--prenet-units", - default=256, - type=int, - help="Number of prenet hidden units", - ) - group.add_argument( - "--postnet-layers", default=5, type=int, help="Number of postnet layers" - ) - group.add_argument( - "--postnet-chans", default=512, type=int, help="Number of postnet channels" - ) - group.add_argument( - "--postnet-filts", default=5, type=int, help="Filter size of postnet" - ) - group.add_argument( - "--output-activation", - default=None, - type=str, - nargs="?", - help="Output activation function", - ) - # cbhg - group.add_argument( - "--use-cbhg", - default=False, - type=strtobool, - help="Whether to use CBHG module", - ) - group.add_argument( - "--cbhg-conv-bank-layers", - default=8, - type=int, - help="Number of convoluional bank layers in CBHG", - ) - group.add_argument( - "--cbhg-conv-bank-chans", - default=128, - type=int, - help="Number of convoluional bank channles in CBHG", - ) - group.add_argument( - "--cbhg-conv-proj-filts", - default=3, - type=int, - help="Filter size of convoluional projection layer in CBHG", - ) - group.add_argument( - "--cbhg-conv-proj-chans", - default=256, - type=int, - help="Number of convoluional projection channels in CBHG", - ) - group.add_argument( - "--cbhg-highway-layers", - default=4, - type=int, - help="Number of highway layers in CBHG", - ) - group.add_argument( - "--cbhg-highway-units", - default=128, - type=int, - help="Number of highway units in CBHG", - ) - group.add_argument( - "--cbhg-gru-units", - default=256, - type=int, - help="Number of GRU units in CBHG", - ) - # model (parameter) related - group.add_argument( - "--use-batch-norm", - default=True, - type=strtobool, - help="Whether to use batch normalization", - ) - group.add_argument( - "--use-concate", - default=True, - type=strtobool, - help="Whether to concatenate encoder embedding with decoder outputs", - ) - group.add_argument( - "--use-residual", - default=True, - type=strtobool, - help="Whether to use residual connection in conv layer", - ) - group.add_argument( - "--dropout-rate", default=0.5, type=float, help="Dropout rate" - ) - group.add_argument( - "--zoneout-rate", default=0.1, type=float, help="Zoneout rate" - ) - group.add_argument( - "--reduction-factor", - default=1, - type=int, - help="Reduction factor (for decoder)", - ) - group.add_argument( - "--encoder-reduction-factor", - default=1, - type=int, - help="Reduction factor (for encoder)", - ) - group.add_argument( - "--spk-embed-dim", - default=None, - type=int, - help="Number of speaker embedding dimensions", - ) - group.add_argument( - "--spc-dim", default=None, type=int, help="Number of spectrogram dimensions" - ) - group.add_argument( - "--pretrained-model", default=None, type=str, help="Pretrained model path" - ) - # loss related - group.add_argument( - "--use-masking", - default=False, - type=strtobool, - help="Whether to use masking in calculation of loss", - ) - group.add_argument( - "--bce-pos-weight", - default=20.0, - type=float, - help="Positive sample weight in BCE calculation " - "(only for use-masking=True)", - ) - group.add_argument( - "--use-guided-attn-loss", - default=False, - type=strtobool, - help="Whether to use guided attention loss", - ) - group.add_argument( - "--guided-attn-loss-sigma", - default=0.4, - type=float, - help="Sigma in guided attention loss", - ) - group.add_argument( - "--guided-attn-loss-lambda", - default=1.0, - type=float, - help="Lambda in guided attention loss", - ) - group.add_argument( - "--src-reconstruction-loss-lambda", - default=1.0, - type=float, - help="Lambda in source reconstruction loss", - ) - group.add_argument( - "--trg-reconstruction-loss-lambda", - default=1.0, - type=float, - help="Lambda in target reconstruction loss", - ) - return parser - - def __init__(self, idim, odim, args=None): - """Initialize Tacotron2 module. - - Args: - idim (int): Dimension of the inputs. - odim (int): Dimension of the outputs. - args (Namespace, optional): - - spk_embed_dim (int): Dimension of the speaker embedding. - - elayers (int): The number of encoder blstm layers. - - eunits (int): The number of encoder blstm units. - - econv_layers (int): The number of encoder conv layers. - - econv_filts (int): The number of encoder conv filter size. - - econv_chans (int): The number of encoder conv filter channels. - - dlayers (int): The number of decoder lstm layers. - - dunits (int): The number of decoder lstm units. - - prenet_layers (int): The number of prenet layers. - - prenet_units (int): The number of prenet units. - - postnet_layers (int): The number of postnet layers. - - postnet_filts (int): The number of postnet filter size. - - postnet_chans (int): The number of postnet filter channels. - - output_activation (int): The name of activation function for outputs. - - adim (int): The number of dimension of mlp in attention. - - aconv_chans (int): The number of attention conv filter channels. - - aconv_filts (int): The number of attention conv filter size. - - cumulate_att_w (bool): Whether to cumulate previous attention weight. - - use_batch_norm (bool): Whether to use batch normalization. - - use_concate (int): - Whether to concatenate encoder embedding with decoder lstm outputs. - - dropout_rate (float): Dropout rate. - - zoneout_rate (float): Zoneout rate. - - reduction_factor (int): Reduction factor. - - spk_embed_dim (int): Number of speaker embedding dimenstions. - - spc_dim (int): Number of spectrogram embedding dimenstions - (only for use_cbhg=True). - - use_cbhg (bool): Whether to use CBHG module. - - cbhg_conv_bank_layers (int): - The number of convoluional banks in CBHG. - - cbhg_conv_bank_chans (int): - The number of channels of convolutional bank in CBHG. - - cbhg_proj_filts (int): - The number of filter size of projection layeri in CBHG. - - cbhg_proj_chans (int): - The number of channels of projection layer in CBHG. - - cbhg_highway_layers (int): - The number of layers of highway network in CBHG. - - cbhg_highway_units (int): - The number of units of highway network in CBHG. - - cbhg_gru_units (int): The number of units of GRU in CBHG. - - use_masking (bool): Whether to mask padded part in loss calculation. - - bce_pos_weight (float): Weight of positive sample of stop token - (only for use_masking=True). - - use-guided-attn-loss (bool): Whether to use guided attention loss. - - guided-attn-loss-sigma (float) Sigma in guided attention loss. - - guided-attn-loss-lamdba (float): Lambda in guided attention loss. - - """ - # initialize base classes - TTSInterface.__init__(self) - torch.nn.Module.__init__(self) - - # fill missing arguments - args = fill_missing_args(args, self.add_arguments) - - # store hyperparameters - self.idim = idim - self.odim = odim - self.adim = args.adim - self.spk_embed_dim = args.spk_embed_dim - self.cumulate_att_w = args.cumulate_att_w - self.reduction_factor = args.reduction_factor - self.encoder_reduction_factor = args.encoder_reduction_factor - self.use_cbhg = args.use_cbhg - self.use_guided_attn_loss = args.use_guided_attn_loss - self.src_reconstruction_loss_lambda = args.src_reconstruction_loss_lambda - self.trg_reconstruction_loss_lambda = args.trg_reconstruction_loss_lambda - - # define activation function for the final output - if args.output_activation is None: - self.output_activation_fn = None - elif hasattr(F, args.output_activation): - self.output_activation_fn = getattr(F, args.output_activation) - else: - raise ValueError( - "there is no such an activation function. (%s)" % args.output_activation - ) - - # define network modules - self.enc = Encoder( - idim=idim * args.encoder_reduction_factor, - input_layer="linear", - elayers=args.elayers, - eunits=args.eunits, - econv_layers=args.econv_layers, - econv_chans=args.econv_chans, - econv_filts=args.econv_filts, - use_batch_norm=args.use_batch_norm, - use_residual=args.use_residual, - dropout_rate=args.dropout_rate, - ) - dec_idim = ( - args.eunits - if args.spk_embed_dim is None - else args.eunits + args.spk_embed_dim - ) - if args.atype == "location": - att = AttLoc( - dec_idim, args.dunits, args.adim, args.aconv_chans, args.aconv_filts - ) - elif args.atype == "forward": - att = AttForward( - dec_idim, args.dunits, args.adim, args.aconv_chans, args.aconv_filts - ) - if self.cumulate_att_w: - logging.warning( - "cumulation of attention weights is disabled in forward attention." - ) - self.cumulate_att_w = False - elif args.atype == "forward_ta": - att = AttForwardTA( - dec_idim, - args.dunits, - args.adim, - args.aconv_chans, - args.aconv_filts, - odim, - ) - if self.cumulate_att_w: - logging.warning( - "cumulation of attention weights is disabled in forward attention." - ) - self.cumulate_att_w = False - else: - raise NotImplementedError("Support only location or forward") - self.dec = Decoder( - idim=dec_idim, - odim=odim, - att=att, - dlayers=args.dlayers, - dunits=args.dunits, - prenet_layers=args.prenet_layers, - prenet_units=args.prenet_units, - postnet_layers=args.postnet_layers, - postnet_chans=args.postnet_chans, - postnet_filts=args.postnet_filts, - output_activation_fn=self.output_activation_fn, - cumulate_att_w=self.cumulate_att_w, - use_batch_norm=args.use_batch_norm, - use_concate=args.use_concate, - dropout_rate=args.dropout_rate, - zoneout_rate=args.zoneout_rate, - reduction_factor=args.reduction_factor, - ) - self.taco2_loss = Tacotron2Loss( - use_masking=args.use_masking, bce_pos_weight=args.bce_pos_weight - ) - if self.use_guided_attn_loss: - self.attn_loss = GuidedAttentionLoss( - sigma=args.guided_attn_loss_sigma, - alpha=args.guided_attn_loss_lambda, - ) - if self.use_cbhg: - self.cbhg = CBHG( - idim=odim, - odim=args.spc_dim, - conv_bank_layers=args.cbhg_conv_bank_layers, - conv_bank_chans=args.cbhg_conv_bank_chans, - conv_proj_filts=args.cbhg_conv_proj_filts, - conv_proj_chans=args.cbhg_conv_proj_chans, - highway_layers=args.cbhg_highway_layers, - highway_units=args.cbhg_highway_units, - gru_units=args.cbhg_gru_units, - ) - self.cbhg_loss = CBHGLoss(use_masking=args.use_masking) - if self.src_reconstruction_loss_lambda > 0: - self.src_reconstructor = Encoder( - idim=dec_idim, - input_layer="linear", - elayers=args.elayers, - eunits=args.eunits, - econv_layers=args.econv_layers, - econv_chans=args.econv_chans, - econv_filts=args.econv_filts, - use_batch_norm=args.use_batch_norm, - use_residual=args.use_residual, - dropout_rate=args.dropout_rate, - ) - self.src_reconstructor_linear = torch.nn.Linear( - args.econv_chans, idim * args.encoder_reduction_factor - ) - - self.src_reconstruction_loss = CBHGLoss(use_masking=args.use_masking) - if self.trg_reconstruction_loss_lambda > 0: - self.trg_reconstructor = Encoder( - idim=dec_idim, - input_layer="linear", - elayers=args.elayers, - eunits=args.eunits, - econv_layers=args.econv_layers, - econv_chans=args.econv_chans, - econv_filts=args.econv_filts, - use_batch_norm=args.use_batch_norm, - use_residual=args.use_residual, - dropout_rate=args.dropout_rate, - ) - self.trg_reconstructor_linear = torch.nn.Linear( - args.econv_chans, odim * args.reduction_factor - ) - self.trg_reconstruction_loss = CBHGLoss(use_masking=args.use_masking) - - # load pretrained model - if args.pretrained_model is not None: - self.load_pretrained_model(args.pretrained_model) - - def forward( - self, xs, ilens, ys, labels, olens, spembs=None, spcs=None, *args, **kwargs - ): - """Calculate forward propagation. - - Args: - xs (Tensor): Batch of padded acoustic features (B, Tmax, idim). - ilens (LongTensor): Batch of lengths of each input batch (B,). - ys (Tensor): Batch of padded target features (B, Lmax, odim). - olens (LongTensor): Batch of the lengths of each target (B,). - spembs (Tensor, optional): - Batch of speaker embedding vectors (B, spk_embed_dim). - spcs (Tensor, optional): - Batch of groundtruth spectrograms (B, Lmax, spc_dim). - - Returns: - Tensor: Loss value. - - """ - # remove unnecessary padded part (for multi-gpus) - max_in = max(ilens) - max_out = max(olens) - if max_in != xs.shape[1]: - xs = xs[:, :max_in] - if max_out != ys.shape[1]: - ys = ys[:, :max_out] - labels = labels[:, :max_out] - - # thin out input frames for reduction factor - # (B, Lmax, idim) -> (B, Lmax // r, idim * r) - if self.encoder_reduction_factor > 1: - B, Lmax, idim = xs.shape - if Lmax % self.encoder_reduction_factor != 0: - xs = xs[:, : -(Lmax % self.encoder_reduction_factor), :] - xs_ds = xs.contiguous().view( - B, - int(Lmax / self.encoder_reduction_factor), - idim * self.encoder_reduction_factor, - ) - ilens_ds = ilens.new( - [ilen // self.encoder_reduction_factor for ilen in ilens] - ) - else: - xs_ds, ilens_ds = xs, ilens - - # calculate tacotron2 outputs - hs, hlens = self.enc(xs_ds, ilens_ds) - if self.spk_embed_dim is not None: - spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1) - hs = torch.cat([hs, spembs], dim=-1) - after_outs, before_outs, logits, att_ws = self.dec(hs, hlens, ys) - - # caluculate src reconstruction - if self.src_reconstruction_loss_lambda > 0: - B, _in_length, _adim = hs.shape - xt, xtlens = self.src_reconstructor(hs, hlens) - xt = self.src_reconstructor_linear(xt) - if self.encoder_reduction_factor > 1: - xt = xt.view(B, -1, self.idim) - - # caluculate trg reconstruction - if self.trg_reconstruction_loss_lambda > 0: - olens_trg_cp = olens.new( - sorted([olen // self.reduction_factor for olen in olens], reverse=True) - ) - B, _in_length, _adim = hs.shape - _, _out_length, _ = att_ws.shape - # att_R should be [B, out_length / r_d, adim] - att_R = torch.sum( - hs.view(B, 1, _in_length, _adim) - * att_ws.view(B, _out_length, _in_length, 1), - dim=2, - ) - yt, ytlens = self.trg_reconstructor( - att_R, olens_trg_cp - ) # is using olens correct? - yt = self.trg_reconstructor_linear(yt) - if self.reduction_factor > 1: - yt = yt.view( - B, -1, self.odim - ) # now att_R should be [B, out_length, adim] - - # modifiy mod part of groundtruth - if self.reduction_factor > 1: - olens = olens.new([olen - olen % self.reduction_factor for olen in olens]) - max_out = max(olens) - ys = ys[:, :max_out] - labels = labels[:, :max_out] - labels[:, -1] = 1.0 # make sure at least one frame has 1 - if self.encoder_reduction_factor > 1: - ilens = ilens.new( - [ilen - ilen % self.encoder_reduction_factor for ilen in ilens] - ) - max_in = max(ilens) - xs = xs[:, :max_in] - - # caluculate taco2 loss - l1_loss, mse_loss, bce_loss = self.taco2_loss( - after_outs, before_outs, logits, ys, labels, olens - ) - loss = l1_loss + mse_loss + bce_loss - report_keys = [ - {"l1_loss": l1_loss.item()}, - {"mse_loss": mse_loss.item()}, - {"bce_loss": bce_loss.item()}, - ] - - # caluculate context_perservation loss - if self.src_reconstruction_loss_lambda > 0: - src_recon_l1_loss, src_recon_mse_loss = self.src_reconstruction_loss( - xt, xs, ilens - ) - loss = loss + src_recon_l1_loss - report_keys += [ - {"src_recon_l1_loss": src_recon_l1_loss.item()}, - {"src_recon_mse_loss": src_recon_mse_loss.item()}, - ] - if self.trg_reconstruction_loss_lambda > 0: - trg_recon_l1_loss, trg_recon_mse_loss = self.trg_reconstruction_loss( - yt, ys, olens - ) - loss = loss + trg_recon_l1_loss - report_keys += [ - {"trg_recon_l1_loss": trg_recon_l1_loss.item()}, - {"trg_recon_mse_loss": trg_recon_mse_loss.item()}, - ] - - # caluculate attention loss - if self.use_guided_attn_loss: - # NOTE(kan-bayashi): length of output for auto-regressive input - # will be changed when r > 1 - if self.encoder_reduction_factor > 1: - ilens_in = ilens.new( - [ilen // self.encoder_reduction_factor for ilen in ilens] - ) - else: - ilens_in = ilens - if self.reduction_factor > 1: - olens_in = olens.new([olen // self.reduction_factor for olen in olens]) - else: - olens_in = olens - attn_loss = self.attn_loss(att_ws, ilens_in, olens_in) - loss = loss + attn_loss - report_keys += [ - {"attn_loss": attn_loss.item()}, - ] - - # caluculate cbhg loss - if self.use_cbhg: - # remove unnecessary padded part (for multi-gpus) - if max_out != spcs.shape[1]: - spcs = spcs[:, :max_out] - - # caluculate cbhg outputs & loss and report them - cbhg_outs, _ = self.cbhg(after_outs, olens) - cbhg_l1_loss, cbhg_mse_loss = self.cbhg_loss(cbhg_outs, spcs, olens) - loss = loss + cbhg_l1_loss + cbhg_mse_loss - report_keys += [ - {"cbhg_l1_loss": cbhg_l1_loss.item()}, - {"cbhg_mse_loss": cbhg_mse_loss.item()}, - ] - - report_keys += [{"loss": loss.item()}] - self.reporter.report(report_keys) - - return loss - - def inference(self, x, inference_args, spemb=None, *args, **kwargs): - """Generate the sequence of features given the sequences of characters. - - Args: - x (Tensor): Input sequence of acoustic features (T, idim). - inference_args (Namespace): - - threshold (float): Threshold in inference. - - minlenratio (float): Minimum length ratio in inference. - - maxlenratio (float): Maximum length ratio in inference. - spemb (Tensor, optional): Speaker embedding vector (spk_embed_dim). - - Returns: - Tensor: Output sequence of features (L, odim). - Tensor: Output sequence of stop probabilities (L,). - Tensor: Attention weights (L, T). - - """ - # get options - threshold = inference_args.threshold - minlenratio = inference_args.minlenratio - maxlenratio = inference_args.maxlenratio - - # thin out input frames for reduction factor - # (B, Lmax, idim) -> (B, Lmax // r, idim * r) - if self.encoder_reduction_factor > 1: - Lmax, idim = x.shape - if Lmax % self.encoder_reduction_factor != 0: - x = x[: -(Lmax % self.encoder_reduction_factor), :] - x_ds = x.contiguous().view( - int(Lmax / self.encoder_reduction_factor), - idim * self.encoder_reduction_factor, - ) - else: - x_ds = x - - # inference - h = self.enc.inference(x_ds) - if self.spk_embed_dim is not None: - spemb = F.normalize(spemb, dim=0).unsqueeze(0).expand(h.size(0), -1) - h = torch.cat([h, spemb], dim=-1) - outs, probs, att_ws = self.dec.inference(h, threshold, minlenratio, maxlenratio) - - if self.use_cbhg: - cbhg_outs = self.cbhg.inference(outs) - return cbhg_outs, probs, att_ws - else: - return outs, probs, att_ws - - def calculate_all_attentions(self, xs, ilens, ys, spembs=None, *args, **kwargs): - """Calculate all of the attention weights. - - Args: - xs (Tensor): Batch of padded acoustic features (B, Tmax, idim). - ilens (LongTensor): Batch of lengths of each input batch (B,). - ys (Tensor): Batch of padded target features (B, Lmax, odim). - olens (LongTensor): Batch of the lengths of each target (B,). - spembs (Tensor, optional): - Batch of speaker embedding vectors (B, spk_embed_dim). - - Returns: - numpy.ndarray: Batch of attention weights (B, Lmax, Tmax). - - """ - # check ilens type (should be list of int) - if isinstance(ilens, torch.Tensor) or isinstance(ilens, np.ndarray): - ilens = list(map(int, ilens)) - - self.eval() - with torch.no_grad(): - # thin out input frames for reduction factor - # (B, Lmax, idim) -> (B, Lmax // r, idim * r) - if self.encoder_reduction_factor > 1: - B, Lmax, idim = xs.shape - if Lmax % self.encoder_reduction_factor != 0: - xs = xs[:, : -(Lmax % self.encoder_reduction_factor), :] - xs_ds = xs.contiguous().view( - B, - int(Lmax / self.encoder_reduction_factor), - idim * self.encoder_reduction_factor, - ) - ilens_ds = [ilen // self.encoder_reduction_factor for ilen in ilens] - else: - xs_ds, ilens_ds = xs, ilens - - hs, hlens = self.enc(xs_ds, ilens_ds) - if self.spk_embed_dim is not None: - spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1) - hs = torch.cat([hs, spembs], dim=-1) - att_ws = self.dec.calculate_all_attentions(hs, hlens, ys) - self.train() - - return att_ws.cpu().numpy() - - @property - def base_plot_keys(self): - """Return base key names to plot during training. - - keys should match what `chainer.reporter` reports. - If you add the key `loss`, the reporter will report `main/loss` - and `validation/main/loss` values. - also `loss.png` will be created as a figure visulizing `main/loss` - and `validation/main/loss` values. - - Returns: - list: List of strings which are base keys to plot during training. - - """ - plot_keys = ["loss", "l1_loss", "mse_loss", "bce_loss"] - if self.use_guided_attn_loss: - plot_keys += ["attn_loss"] - if self.use_cbhg: - plot_keys += ["cbhg_l1_loss", "cbhg_mse_loss"] - if self.src_reconstruction_loss_lambda > 0: - plot_keys += ["src_recon_l1_loss", "src_recon_mse_loss"] - if self.trg_reconstruction_loss_lambda > 0: - plot_keys += ["trg_recon_l1_loss", "trg_recon_mse_loss"] - return plot_keys - - def _sort_by_length(self, xs, ilens): - sort_ilens, sort_idx = ilens.sort(0, descending=True) - return xs[sort_idx], ilens[sort_idx], sort_idx - - def _revert_sort_by_length(self, xs, ilens, sort_idx): - _, revert_idx = sort_idx.sort(0) - return xs[revert_idx], ilens[revert_idx] diff --git a/spaces/sentencebird/translation-word-order/app.py b/spaces/sentencebird/translation-word-order/app.py deleted file mode 100644 index 7b6a84e1513a9b8aac3679b2b36457f69348b37e..0000000000000000000000000000000000000000 --- a/spaces/sentencebird/translation-word-order/app.py +++ /dev/null @@ -1,160 +0,0 @@ -import streamlit as st -import streamlit.components.v1 as components -import torch -import pickle - -from transformers import MBart50TokenizerFast, MBartForConditionalGeneration - -from pyvis.network import Network - -#from constants import * - -lang_codes_by_name = \ -{'Arabic': 'ar_AR', - 'Czech': 'cs_CZ', - 'German': 'de_DE', - 'English': 'en_XX', - 'Spanish': 'es_XX', - 'Estonian': 'et_EE', - 'Finnish': 'fi_FI', - 'French': 'fr_XX', - 'Gujarati': 'gu_IN', - 'Hindi': 'hi_IN', - 'Italian': 'it_IT', - 'Japanese': 'ja_XX', - 'Kazakh': 'kk_KZ', - 'Korean': 'ko_KR', - 'Lithuanian': 'lt_LT', - 'Latvian': 'lv_LV', - 'Burmese': 'my_MM', - 'Nepali': 'ne_NP', - 'Dutch': 'nl_XX', - 'Romanian': 'ro_RO', - 'Russian': 'ru_RU', - 'Sinhala': 'si_LK', - 'Turkish': 'tr_TR', - 'Vietnamese': 'vi_VN', - 'Chinese': 'zh_CN', - 'Afrikaans': 'af_ZA', - 'Azerbaijani': 'az_AZ', - 'Bengali': 'bn_IN', - 'Persian': 'fa_IR', - 'Hebrew': 'he_IL', - 'Croatian': 'hr_HR', - 'Indonesian': 'id_ID', - 'Georgian': 'ka_GE', - 'Khmer': 'km_KH', - 'Macedonian': 'mk_MK', - 'Malayalam': 'ml_IN', - 'Mongolian': 'mn_MN', - 'Marathi': 'mr_IN', - 'Polish': 'pl_PL', - 'Pashto': 'ps_AF', - 'Portuguese': 'pt_XX', - 'Swedish': 'sv_SE', - 'Swahili': 'sw_KE', - 'Tamil': 'ta_IN', - 'Telugu': 'te_IN', - 'Thai': 'th_TH', - 'Tagalog': 'tl_XX', - 'Ukrainian': 'uk_UA', - 'Urdu': 'ur_PK', - 'Xhosa': 'xh_ZA', - 'Galician': 'gl_ES', - 'Slovene': 'sl_SI'} - -@st.cache(allow_output_mutation=True) -def load_model(): - return MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") - -@st.cache(allow_output_mutation=True) -def load_tokenizer(): - return MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt", use_fast=False) - -class Translation(): - def __init__(self, src_lang, dest_lang): - self.model = load_model() - self.tokenizer = load_tokenizer() - self.tokenizer.src_lang = src_lang - self.dest_lang = dest_lang - - def process(self, src_text): - encoded = self.tokenizer(src_text, return_tensors="pt") - generated_tokens = self.model.generate(**encoded, forced_bos_token_id=self.tokenizer.lang_code_to_id[self.dest_lang]) - generated_texts = self.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) - self.dest_text = generated_texts[0] - - encoder_input_ids = self.tokenizer(src_text, return_tensors="pt").input_ids - decoder_input_ids = self.tokenizer(self.dest_text, return_tensors="pt").input_ids - - self.outputs = self.model(input_ids=encoder_input_ids, decoder_input_ids=decoder_input_ids, output_attentions=True) - - self.encoder_text = self.tokenizer.convert_ids_to_tokens(encoder_input_ids[0]) - self.decoder_text = self.tokenizer.convert_ids_to_tokens(decoder_input_ids[0]) - - mean_cross_attentions = tuple([torch.mean(self.outputs.cross_attentions[0], 1, True)]) - self.positions = [int(i) for i in torch.argmax(mean_cross_attentions[0], dim=2).flatten()] # 語順の並び変えの配列 - -class TranslationNetwork(): - def __init__(self, network): - self.network = network - self.n_nodes = 0 - self.n_src_nodes = 0 - self.n_dest_nodes = 0 - - def add_nodes(self, words, group): - if group == "src": - self.n_src_nodes = len(words) - group_i = 0 - hidden_nodes_i = [0, self.n_src_nodes-1] - elif group == "dest": - self.n_dest_nodes = len(words) - group_i = 1 - hidden_nodes_i = [0, self.n_dest_nodes-1] - self.hidden_edges_i = [0, self.n_src_nodes-1, self.n_src_nodes, self.n_src_nodes+self.n_dest_nodes-1] - - size = 10 - x_margin, y_margin = 100, 100 - for i, word in enumerate(words): - hidden = i in hidden_nodes_i - self.network.add_node(self.n_nodes, shape="square", label=word, group=f"{group}", x=i*x_margin, y=group_i*y_margin, size=size, physics=False, hidden=hidden) - self.n_nodes += 1 - - def add_edges(self, positions): - for i, position in enumerate(positions): - j = self.n_src_nodes + position - hidden = i in self.hidden_edges_i or j in self.hidden_edges_i - self.network.add_edge(i, j, color="gray", hidden=hidden) - -st.set_page_config(layout="wide") -st.title("The Word Order Comparison of Translation") - -src_lang_name = st.selectbox("Source Language", list(lang_codes_by_name.keys()), index=3) -tgt_lang_name = st.selectbox("Target Language", list(lang_codes_by_name.keys()), index=11) - -with st.spinner("Loading the model"): - src_lang, tgt_lang = lang_codes_by_name[src_lang_name], lang_codes_by_name[tgt_lang_name] - translation = Translation(src_lang, tgt_lang) - -src_text = st.text_input("Original Text", "I saw a girl with a telescope in the garden.") - -if st.button("Translate"): - with st.spinner("Translating..."): - translation.process(src_text) - - st.subheader("Translated") - st.write(translation.dest_text) - - tn = TranslationNetwork(Network(width="100%", height="300px")) - tn.add_nodes(translation.encoder_text, group="src") - tn.add_nodes(translation.decoder_text, group="dest") - tn.add_edges(translation.positions) - - fname = f"{src_text[:20]}_{src_lang_name}_{tgt_lang_name}.html" - tn.network.show(fname) - html_file = open(fname, "r", encoding="utf-8") - components.html(html_file.read(), height=500) - - st.subheader("Order") - st.text(translation.positions) - diff --git a/spaces/sgxz/bingo/src/components/ui/badge.tsx b/spaces/sgxz/bingo/src/components/ui/badge.tsx deleted file mode 100644 index d9a84b394090e5b4b3bd34f6135b9a2f2ead0aa2..0000000000000000000000000000000000000000 --- a/spaces/sgxz/bingo/src/components/ui/badge.tsx +++ /dev/null @@ -1,36 +0,0 @@ -import * as React from 'react' -import { cva, type VariantProps } from 'class-variance-authority' - -import { cn } from '@/lib/utils' - -const badgeVariants = cva( - 'inline-flex items-center rounded-full border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2', - { - variants: { - variant: { - default: - 'border-transparent bg-primary text-primary-foreground hover:bg-primary/80', - secondary: - 'border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80', - destructive: - 'border-transparent bg-destructive text-destructive-foreground hover:bg-destructive/80', - outline: 'text-foreground' - } - }, - defaultVariants: { - variant: 'default' - } - } -) - -export interface BadgeProps - extends React.HTMLAttributes, - VariantProps {} - -function Badge({ className, variant, ...props }: BadgeProps) { - return ( -
            - ) -} - -export { Badge, badgeVariants } diff --git a/spaces/shaocongma/faiss_chat/knowledge/faiss_handler.py b/spaces/shaocongma/faiss_chat/knowledge/faiss_handler.py deleted file mode 100644 index 44252632bb2be58aead64d97f28b864e982721d8..0000000000000000000000000000000000000000 --- a/spaces/shaocongma/faiss_chat/knowledge/faiss_handler.py +++ /dev/null @@ -1,203 +0,0 @@ -import json -import uuid -from langchain.vectorstores import FAISS -import os -from tqdm.auto import tqdm -from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.document_loaders import DirectoryLoader, TextLoader -from llms.embeddings import EMBEDDINGS_MAPPING -import tiktoken -import zipfile -import pickle - -tokenizer_name = tiktoken.encoding_for_model('gpt-4') -tokenizer = tiktoken.get_encoding(tokenizer_name.name) -EMBED_MODEL = "text-embedding-ada-002" -EMBED_DIM = 1536 -METRIC = 'cosine' - -####################################################################################################################### -# Files handler -####################################################################################################################### -def check_existence(path): - return os.path.isfile(path) or os.path.isdir(path) - - -def list_files(directory, ext=".pdf"): - # List all files in the directory - files_in_directory = os.listdir(directory) - # Filter the list to only include PDF files - files_list = [file for file in files_in_directory if file.endswith(ext)] - return files_list - - -def list_pdf_files(directory): - # List all files in the directory - files_in_directory = os.listdir(directory) - # Filter the list to only include PDF files - pdf_files = [file for file in files_in_directory if file.endswith(".pdf")] - return pdf_files - - - -def tiktoken_len(text): - # evaluate how many tokens for the given text - tokens = tokenizer.encode(text, disallowed_special=()) - return len(tokens) - - -def get_chunks(docs, chunk_size=500, chunk_overlap=20, length_function=tiktoken_len): - # docs should be the output of `loader.load()` - text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, - chunk_overlap=chunk_overlap, - length_function=length_function, - separators=["\n\n", "\n", " ", ""]) - chunks = [] - for idx, page in enumerate(tqdm(docs)): - source = page.metadata.get('source') - content = page.page_content - if len(content) > 50: - texts = text_splitter.split_text(content) - chunks.extend([str({'content': texts[i], 'chunk': i, 'source': os.path.basename(source)}) for i in - range(len(texts))]) - return chunks - - -####################################################################################################################### -# Create FAISS object -####################################################################################################################### - -# ["text-embedding-ada-002", "distilbert-dot-tas_b-b256-msmarco"] - -def create_faiss_index_from_zip(path_to_zip_file, embeddings=None, pdf_loader=None, - chunk_size=500, chunk_overlap=20, - project_name="Very_Cool_Project_Name"): - # initialize the file structure - # structure: project_name - # - source data - # - embeddings - # - faiss_index - if isinstance(embeddings, str): - import copy - embeddings_str = copy.deepcopy(embeddings) - else: - embeddings_str = "other-embedding-model" - - if embeddings is None or embeddings == "text-embedding-ada-002": - embeddings = EMBEDDINGS_MAPPING["text-embedding-ada-002"] - elif isinstance(embeddings, str): - embeddings = EMBEDDINGS_MAPPING[embeddings] - else: - embeddings = EMBEDDINGS_MAPPING["text-embedding-ada-002"] - # STEP 1: - # Create a folder f"{project_name}" in the current directory. - current_directory = os.getcwd() - if not os.path.exists(project_name): - os.makedirs(project_name) - project_path = os.path.join(current_directory, project_name) - source_data = os.path.join(project_path, "source_data") - embeddings_data = os.path.join(project_path, "embeddings") - index_data = os.path.join(project_path, "faiss_index") - os.makedirs(source_data) #./project/source_data - os.makedirs(embeddings_data) #./project/embeddings - os.makedirs(index_data) #./project/faiss_index - else: - raise ValueError(f"The project {project_name} exists.") - with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref: - # extract everything to "source_data" - zip_ref.extractall(source_data) - - - db_meta = {"project_name": project_name, - "pdf_loader": pdf_loader.__name__, "chunk_size": chunk_size, - "chunk_overlap": chunk_overlap, - "embedding_model": embeddings_str, - "files": os.listdir(source_data), - "source_path": source_data} - with open(os.path.join(project_path, "db_meta.json"), "w", encoding="utf-8") as f: - # save db_meta.json to folder - json.dump(db_meta, f) - - - all_docs = [] - for ext in [".txt", ".tex", ".md", ".pdf"]: - if ext in [".txt", ".tex", ".md"]: - loader = DirectoryLoader(source_data, glob=f"**/*{ext}", loader_cls=TextLoader, - loader_kwargs={'autodetect_encoding': True}) - elif ext in [".pdf"]: - loader = DirectoryLoader(source_data, glob=f"**/*{ext}", loader_cls=pdf_loader) - else: - continue - docs = loader.load() - all_docs = all_docs + docs - - # split pdf files into chunks and evaluate its embeddings; save all results into embeddings - chunks = get_chunks(all_docs, chunk_size, chunk_overlap) - text_embeddings = embeddings.embed_documents(chunks) - text_embedding_pairs = list(zip(chunks, text_embeddings)) - embeddings_save_to = os.path.join(embeddings_data, 'text_embedding_pairs.pickle') - with open(embeddings_save_to, 'wb') as handle: - pickle.dump(text_embedding_pairs, handle, protocol=pickle.HIGHEST_PROTOCOL) - db = FAISS.from_embeddings(text_embedding_pairs, embeddings) - - db.save_local(index_data) - print(db_meta) - print("Success!") - return db, project_name, db_meta - - -def find_file(file_name, directory): - for root, dirs, files in os.walk(directory): - if file_name in files: - return os.path.join(root, file_name) - return None # If the file was not found - -def find_file_dir(file_name, directory): - for root, dirs, files in os.walk(directory): - if file_name in files: - return root # return the directory instead of the full path - return None # If the file was not found - - -def load_faiss_index_from_zip(path_to_zip_file): - # Extract the zip file. Read the db_meta - # base_name = os.path.basename(path_to_zip_file) - path_to_extract = os.path.join(os.getcwd()) - with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref: - zip_ref.extractall(path_to_extract) - - db_meta_json = find_file("db_meta.json" , path_to_extract) - if db_meta_json is not None: - with open(db_meta_json, "r", encoding="utf-8") as f: - db_meta_dict = json.load(f) - else: - raise ValueError("Cannot find `db_meta.json` in the .zip file. ") - - try: - embeddings = EMBEDDINGS_MAPPING[db_meta_dict["embedding_model"]] - except: - from langchain.embeddings.openai import OpenAIEmbeddings - embeddings = OpenAIEmbeddings(model="text-embedding-ada-002") - - # locate index.faiss - index_path = find_file_dir("index.faiss", path_to_extract) - if index_path is not None: - db = FAISS.load_local(index_path, embeddings) - return db - else: - raise ValueError("Failed to find `index.faiss` in the .zip file.") - - -if __name__ == "__main__": - from langchain.document_loaders import PyPDFLoader - from langchain.embeddings.openai import OpenAIEmbeddings - from langchain.embeddings import HuggingFaceEmbeddings - - model_name = "sebastian-hofstaetter/distilbert-dot-tas_b-b256-msmarco" - model_kwargs = {'device': 'cpu'} - encode_kwargs = {'normalize_embeddings': False} - embeddings = HuggingFaceEmbeddings( - model_name=model_name, - model_kwargs=model_kwargs, - encode_kwargs=encode_kwargs) - create_faiss_index_from_zip(path_to_zip_file="document.zip", pdf_loader=PyPDFLoader, embeddings=embeddings) diff --git a/spaces/shencc/gpt/crazy_functions/test_project/cpp/cppipc/pool_alloc.cpp b/spaces/shencc/gpt/crazy_functions/test_project/cpp/cppipc/pool_alloc.cpp deleted file mode 100644 index c94575903bdf2eef71ecbe66382375552446e510..0000000000000000000000000000000000000000 --- a/spaces/shencc/gpt/crazy_functions/test_project/cpp/cppipc/pool_alloc.cpp +++ /dev/null @@ -1,17 +0,0 @@ -#include "libipc/pool_alloc.h" - -#include "libipc/memory/resource.h" - -namespace ipc { -namespace mem { - -void* pool_alloc::alloc(std::size_t size) { - return async_pool_alloc::alloc(size); -} - -void pool_alloc::free(void* p, std::size_t size) { - async_pool_alloc::free(p, size); -} - -} // namespace mem -} // namespace ipc diff --git a/spaces/shencc/gpt/crazy_functions/test_project/cpp/longcode/jpge.cpp b/spaces/shencc/gpt/crazy_functions/test_project/cpp/longcode/jpge.cpp deleted file mode 100644 index 2e26b71ed5aad0d46478fdbcd3a880be1401f946..0000000000000000000000000000000000000000 --- a/spaces/shencc/gpt/crazy_functions/test_project/cpp/longcode/jpge.cpp +++ /dev/null @@ -1,1049 +0,0 @@ -// jpge.cpp - C++ class for JPEG compression. -// Public domain, Rich Geldreich -// v1.01, Dec. 18, 2010 - Initial release -// v1.02, Apr. 6, 2011 - Removed 2x2 ordered dither in H2V1 chroma subsampling method load_block_16_8_8(). (The rounding factor was 2, when it should have been 1. Either way, it wasn't helping.) -// v1.03, Apr. 16, 2011 - Added support for optimized Huffman code tables, optimized dynamic memory allocation down to only 1 alloc. -// Also from Alex Evans: Added RGBA support, linear memory allocator (no longer needed in v1.03). -// v1.04, May. 19, 2012: Forgot to set m_pFile ptr to NULL in cfile_stream::close(). Thanks to Owen Kaluza for reporting this bug. -// Code tweaks to fix VS2008 static code analysis warnings (all looked harmless). -// Code review revealed method load_block_16_8_8() (used for the non-default H2V1 sampling mode to downsample chroma) somehow didn't get the rounding factor fix from v1.02. - -#include "jpge.h" - -#include -#include -#if PLATFORM_WINDOWS -#include -#endif - -#define JPGE_MAX(a,b) (((a)>(b))?(a):(b)) -#define JPGE_MIN(a,b) (((a)<(b))?(a):(b)) - -namespace jpge { - -static inline void *jpge_malloc(size_t nSize) { return FMemory::Malloc(nSize); } -static inline void jpge_free(void *p) { FMemory::Free(p);; } - -// Various JPEG enums and tables. -enum { M_SOF0 = 0xC0, M_DHT = 0xC4, M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_APP0 = 0xE0 }; -enum { DC_LUM_CODES = 12, AC_LUM_CODES = 256, DC_CHROMA_CODES = 12, AC_CHROMA_CODES = 256, MAX_HUFF_SYMBOLS = 257, MAX_HUFF_CODESIZE = 32 }; - -static uint8 s_zag[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 }; -static int16 s_std_lum_quant[64] = { 16,11,12,14,12,10,16,14,13,14,18,17,16,19,24,40,26,24,22,22,24,49,35,37,29,40,58,51,61,60,57,51,56,55,64,72,92,78,64,68,87,69,55,56,80,109,81,87,95,98,103,104,103,62,77,113,121,112,100,120,92,101,103,99 }; -static int16 s_std_croma_quant[64] = { 17,18,18,24,21,24,47,26,26,47,99,66,56,66,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99 }; -static uint8 s_dc_lum_bits[17] = { 0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0 }; -static uint8 s_dc_lum_val[DC_LUM_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 }; -static uint8 s_ac_lum_bits[17] = { 0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d }; -static uint8 s_ac_lum_val[AC_LUM_CODES] = -{ - 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08,0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0, - 0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28,0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49, - 0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89, - 0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5, - 0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8, - 0xf9,0xfa -}; -static uint8 s_dc_chroma_bits[17] = { 0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0 }; -static uint8 s_dc_chroma_val[DC_CHROMA_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 }; -static uint8 s_ac_chroma_bits[17] = { 0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77 }; -static uint8 s_ac_chroma_val[AC_CHROMA_CODES] = -{ - 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91,0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0, - 0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26,0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48, - 0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87, - 0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3, - 0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8, - 0xf9,0xfa -}; - -// Low-level helper functions. -template inline void clear_obj(T &obj) { memset(&obj, 0, sizeof(obj)); } - -const int YR = 19595, YG = 38470, YB = 7471, CB_R = -11059, CB_G = -21709, CB_B = 32768, CR_R = 32768, CR_G = -27439, CR_B = -5329; -static inline uint8 clamp(int i) { if (static_cast(i) > 255U) { if (i < 0) i = 0; else if (i > 255) i = 255; } return static_cast(i); } - -static void RGB_to_YCC(uint8* pDst, const uint8 *pSrc, int num_pixels) -{ - for ( ; num_pixels; pDst += 3, pSrc += 3, num_pixels--) - { - const int r = pSrc[0], g = pSrc[1], b = pSrc[2]; - pDst[0] = static_cast((r * YR + g * YG + b * YB + 32768) >> 16); - pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16)); - pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16)); - } -} - -static void RGB_to_Y(uint8* pDst, const uint8 *pSrc, int num_pixels) -{ - for ( ; num_pixels; pDst++, pSrc += 3, num_pixels--) - pDst[0] = static_cast((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16); -} - -static void RGBA_to_YCC(uint8* pDst, const uint8 *pSrc, int num_pixels) -{ - for ( ; num_pixels; pDst += 3, pSrc += 4, num_pixels--) - { - const int r = pSrc[0], g = pSrc[1], b = pSrc[2]; - pDst[0] = static_cast((r * YR + g * YG + b * YB + 32768) >> 16); - pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16)); - pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16)); - } -} - -static void RGBA_to_Y(uint8* pDst, const uint8 *pSrc, int num_pixels) -{ - for ( ; num_pixels; pDst++, pSrc += 4, num_pixels--) - pDst[0] = static_cast((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16); -} - -static void Y_to_YCC(uint8* pDst, const uint8* pSrc, int num_pixels) -{ - for( ; num_pixels; pDst += 3, pSrc++, num_pixels--) { pDst[0] = pSrc[0]; pDst[1] = 128; pDst[2] = 128; } -} - -// Forward DCT - DCT derived from jfdctint. -#define CONST_BITS 13 -#define ROW_BITS 2 -#define DCT_DESCALE(x, n) (((x) + (((int32)1) << ((n) - 1))) >> (n)) -#define DCT_MUL(var, c) (static_cast(var) * static_cast(c)) -#define DCT1D(s0, s1, s2, s3, s4, s5, s6, s7) \ - int32 t0 = s0 + s7, t7 = s0 - s7, t1 = s1 + s6, t6 = s1 - s6, t2 = s2 + s5, t5 = s2 - s5, t3 = s3 + s4, t4 = s3 - s4; \ - int32 t10 = t0 + t3, t13 = t0 - t3, t11 = t1 + t2, t12 = t1 - t2; \ - int32 u1 = DCT_MUL(t12 + t13, 4433); \ - s2 = u1 + DCT_MUL(t13, 6270); \ - s6 = u1 + DCT_MUL(t12, -15137); \ - u1 = t4 + t7; \ - int32 u2 = t5 + t6, u3 = t4 + t6, u4 = t5 + t7; \ - int32 z5 = DCT_MUL(u3 + u4, 9633); \ - t4 = DCT_MUL(t4, 2446); t5 = DCT_MUL(t5, 16819); \ - t6 = DCT_MUL(t6, 25172); t7 = DCT_MUL(t7, 12299); \ - u1 = DCT_MUL(u1, -7373); u2 = DCT_MUL(u2, -20995); \ - u3 = DCT_MUL(u3, -16069); u4 = DCT_MUL(u4, -3196); \ - u3 += z5; u4 += z5; \ - s0 = t10 + t11; s1 = t7 + u1 + u4; s3 = t6 + u2 + u3; s4 = t10 - t11; s5 = t5 + u2 + u4; s7 = t4 + u1 + u3; - -static void DCT2D(int32 *p) -{ - int32 c, *q = p; - for (c = 7; c >= 0; c--, q += 8) - { - int32 s0 = q[0], s1 = q[1], s2 = q[2], s3 = q[3], s4 = q[4], s5 = q[5], s6 = q[6], s7 = q[7]; - DCT1D(s0, s1, s2, s3, s4, s5, s6, s7); - q[0] = s0 << ROW_BITS; q[1] = DCT_DESCALE(s1, CONST_BITS-ROW_BITS); q[2] = DCT_DESCALE(s2, CONST_BITS-ROW_BITS); q[3] = DCT_DESCALE(s3, CONST_BITS-ROW_BITS); - q[4] = s4 << ROW_BITS; q[5] = DCT_DESCALE(s5, CONST_BITS-ROW_BITS); q[6] = DCT_DESCALE(s6, CONST_BITS-ROW_BITS); q[7] = DCT_DESCALE(s7, CONST_BITS-ROW_BITS); - } - for (q = p, c = 7; c >= 0; c--, q++) - { - int32 s0 = q[0*8], s1 = q[1*8], s2 = q[2*8], s3 = q[3*8], s4 = q[4*8], s5 = q[5*8], s6 = q[6*8], s7 = q[7*8]; - DCT1D(s0, s1, s2, s3, s4, s5, s6, s7); - q[0*8] = DCT_DESCALE(s0, ROW_BITS+3); q[1*8] = DCT_DESCALE(s1, CONST_BITS+ROW_BITS+3); q[2*8] = DCT_DESCALE(s2, CONST_BITS+ROW_BITS+3); q[3*8] = DCT_DESCALE(s3, CONST_BITS+ROW_BITS+3); - q[4*8] = DCT_DESCALE(s4, ROW_BITS+3); q[5*8] = DCT_DESCALE(s5, CONST_BITS+ROW_BITS+3); q[6*8] = DCT_DESCALE(s6, CONST_BITS+ROW_BITS+3); q[7*8] = DCT_DESCALE(s7, CONST_BITS+ROW_BITS+3); - } -} - -struct sym_freq { uint m_key, m_sym_index; }; - -// Radix sorts sym_freq[] array by 32-bit key m_key. Returns ptr to sorted values. -static inline sym_freq* radix_sort_syms(uint num_syms, sym_freq* pSyms0, sym_freq* pSyms1) -{ - const uint cMaxPasses = 4; - uint32 hist[256 * cMaxPasses]; clear_obj(hist); - for (uint i = 0; i < num_syms; i++) { uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; hist[256*2 + ((freq >> 16) & 0xFF)]++; hist[256*3 + ((freq >> 24) & 0xFF)]++; } - sym_freq* pCur_syms = pSyms0, *pNew_syms = pSyms1; - uint total_passes = cMaxPasses; while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; - for (uint pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) - { - const uint32* pHist = &hist[pass << 8]; - uint offsets[256], cur_ofs = 0; - for (uint i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } - for (uint i = 0; i < num_syms; i++) - pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; - sym_freq* t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; - } - return pCur_syms; -} - -// calculate_minimum_redundancy() originally written by: Alistair Moffat, alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. -static void calculate_minimum_redundancy(sym_freq *A, int n) -{ - int root, leaf, next, avbl, used, dpth; - if (n==0) return; else if (n==1) { A[0].m_key = 1; return; } - A[0].m_key += A[1].m_key; root = 0; leaf = 2; - for (next=1; next < n-1; next++) - { - if (leaf>=n || A[root].m_key=n || (root=0; next--) A[next].m_key = A[A[next].m_key].m_key+1; - avbl = 1; used = dpth = 0; root = n-2; next = n-1; - while (avbl>0) - { - while (root>=0 && (int)A[root].m_key==dpth) { used++; root--; } - while (avbl>used) { A[next--].m_key = dpth; avbl--; } - avbl = 2*used; dpth++; used = 0; - } -} - -// Limits canonical Huffman code table's max code size to max_code_size. -static void huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) -{ - if (code_list_len <= 1) return; - - for (int i = max_code_size + 1; i <= MAX_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; - - uint32 total = 0; - for (int i = max_code_size; i > 0; i--) - total += (((uint32)pNum_codes[i]) << (max_code_size - i)); - - while (total != (1UL << max_code_size)) - { - pNum_codes[max_code_size]--; - for (int i = max_code_size - 1; i > 0; i--) - { - if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } - } - total--; - } -} - -// Generates an optimized offman table. -void jpeg_encoder::optimize_huffman_table(int table_num, int table_len) -{ - sym_freq syms0[MAX_HUFF_SYMBOLS], syms1[MAX_HUFF_SYMBOLS]; - syms0[0].m_key = 1; syms0[0].m_sym_index = 0; // dummy symbol, assures that no valid code contains all 1's - int num_used_syms = 1; - const uint32 *pSym_count = &m_huff_count[table_num][0]; - for (int i = 0; i < table_len; i++) - if (pSym_count[i]) { syms0[num_used_syms].m_key = pSym_count[i]; syms0[num_used_syms++].m_sym_index = i + 1; } - sym_freq* pSyms = radix_sort_syms(num_used_syms, syms0, syms1); - calculate_minimum_redundancy(pSyms, num_used_syms); - - // Count the # of symbols of each code size. - int num_codes[1 + MAX_HUFF_CODESIZE]; clear_obj(num_codes); - for (int i = 0; i < num_used_syms; i++) - num_codes[pSyms[i].m_key]++; - - const uint JPGE_CODE_SIZE_LIMIT = 16; // the maximum possible size of a JPEG Huffman code (valid range is [9,16] - 9 vs. 8 because of the dummy symbol) - huffman_enforce_max_code_size(num_codes, num_used_syms, JPGE_CODE_SIZE_LIMIT); - - // Compute m_huff_bits array, which contains the # of symbols per code size. - clear_obj(m_huff_bits[table_num]); - for (int i = 1; i <= (int)JPGE_CODE_SIZE_LIMIT; i++) - m_huff_bits[table_num][i] = static_cast(num_codes[i]); - - // Remove the dummy symbol added above, which must be in largest bucket. - for (int i = JPGE_CODE_SIZE_LIMIT; i >= 1; i--) - { - if (m_huff_bits[table_num][i]) { m_huff_bits[table_num][i]--; break; } - } - - // Compute the m_huff_val array, which contains the symbol indices sorted by code size (smallest to largest). - for (int i = num_used_syms - 1; i >= 1; i--) - m_huff_val[table_num][num_used_syms - 1 - i] = static_cast(pSyms[i].m_sym_index - 1); -} - -// JPEG marker generation. -void jpeg_encoder::emit_byte(uint8 i) -{ - m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_obj(i); -} - -void jpeg_encoder::emit_word(uint i) -{ - emit_byte(uint8(i >> 8)); emit_byte(uint8(i & 0xFF)); -} - -void jpeg_encoder::emit_marker(int marker) -{ - emit_byte(uint8(0xFF)); emit_byte(uint8(marker)); -} - -// Emit JFIF marker -void jpeg_encoder::emit_jfif_app0() -{ - emit_marker(M_APP0); - emit_word(2 + 4 + 1 + 2 + 1 + 2 + 2 + 1 + 1); - emit_byte(0x4A); emit_byte(0x46); emit_byte(0x49); emit_byte(0x46); /* Identifier: ASCII "JFIF" */ - emit_byte(0); - emit_byte(1); /* Major version */ - emit_byte(1); /* Minor version */ - emit_byte(0); /* Density unit */ - emit_word(1); - emit_word(1); - emit_byte(0); /* No thumbnail image */ - emit_byte(0); -} - -// Emit quantization tables -void jpeg_encoder::emit_dqt() -{ - for (int i = 0; i < ((m_num_components == 3) ? 2 : 1); i++) - { - emit_marker(M_DQT); - emit_word(64 + 1 + 2); - emit_byte(static_cast(i)); - for (int j = 0; j < 64; j++) - emit_byte(static_cast(m_quantization_tables[i][j])); - } -} - -// Emit start of frame marker -void jpeg_encoder::emit_sof() -{ - emit_marker(M_SOF0); /* baseline */ - emit_word(3 * m_num_components + 2 + 5 + 1); - emit_byte(8); /* precision */ - emit_word(m_image_y); - emit_word(m_image_x); - emit_byte(m_num_components); - for (int i = 0; i < m_num_components; i++) - { - emit_byte(static_cast(i + 1)); /* component ID */ - emit_byte((m_comp_h_samp[i] << 4) + m_comp_v_samp[i]); /* h and v sampling */ - emit_byte(i > 0); /* quant. table num */ - } -} - -// Emit Huffman table. -void jpeg_encoder::emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag) -{ - emit_marker(M_DHT); - - int length = 0; - for (int i = 1; i <= 16; i++) - length += bits[i]; - - emit_word(length + 2 + 1 + 16); - emit_byte(static_cast(index + (ac_flag << 4))); - - for (int i = 1; i <= 16; i++) - emit_byte(bits[i]); - - for (int i = 0; i < length; i++) - emit_byte(val[i]); -} - -// Emit all Huffman tables. -void jpeg_encoder::emit_dhts() -{ - emit_dht(m_huff_bits[0+0], m_huff_val[0+0], 0, false); - emit_dht(m_huff_bits[2+0], m_huff_val[2+0], 0, true); - if (m_num_components == 3) - { - emit_dht(m_huff_bits[0+1], m_huff_val[0+1], 1, false); - emit_dht(m_huff_bits[2+1], m_huff_val[2+1], 1, true); - } -} - -// emit start of scan -void jpeg_encoder::emit_sos() -{ - emit_marker(M_SOS); - emit_word(2 * m_num_components + 2 + 1 + 3); - emit_byte(m_num_components); - for (int i = 0; i < m_num_components; i++) - { - emit_byte(static_cast(i + 1)); - if (i == 0) - emit_byte((0 << 4) + 0); - else - emit_byte((1 << 4) + 1); - } - emit_byte(0); /* spectral selection */ - emit_byte(63); - emit_byte(0); -} - -// Emit all markers at beginning of image file. -void jpeg_encoder::emit_markers() -{ - emit_marker(M_SOI); - emit_jfif_app0(); - emit_dqt(); - emit_sof(); - emit_dhts(); - emit_sos(); -} - -// Compute the actual canonical Huffman codes/code sizes given the JPEG huff bits and val arrays. -void jpeg_encoder::compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val) -{ - int i, l, last_p, si; - uint8 huff_size[257]; - uint huff_code[257]; - uint code; - - int p = 0; - for (l = 1; l <= 16; l++) - for (i = 1; i <= bits[l]; i++) - huff_size[p++] = (char)l; - - huff_size[p] = 0; last_p = p; // write sentinel - - code = 0; si = huff_size[0]; p = 0; - - while (huff_size[p]) - { - while (huff_size[p] == si) - huff_code[p++] = code++; - code <<= 1; - si++; - } - - memset(codes, 0, sizeof(codes[0])*256); - memset(code_sizes, 0, sizeof(code_sizes[0])*256); - for (p = 0; p < last_p; p++) - { - codes[val[p]] = huff_code[p]; - code_sizes[val[p]] = huff_size[p]; - } -} - -// Quantization table generation. -void jpeg_encoder::compute_quant_table(int32 *pDst, int16 *pSrc) -{ - int32 q; - if (m_params.m_quality < 50) - q = 5000 / m_params.m_quality; - else - q = 200 - m_params.m_quality * 2; - for (int i = 0; i < 64; i++) - { - int32 j = *pSrc++; j = (j * q + 50L) / 100L; - *pDst++ = JPGE_MIN(JPGE_MAX(j, 1), 255); - } -} - -// Higher-level methods. -void jpeg_encoder::first_pass_init() -{ - m_bit_buffer = 0; m_bits_in = 0; - memset(m_last_dc_val, 0, 3 * sizeof(m_last_dc_val[0])); - m_mcu_y_ofs = 0; - m_pass_num = 1; -} - -bool jpeg_encoder::second_pass_init() -{ - compute_huffman_table(&m_huff_codes[0+0][0], &m_huff_code_sizes[0+0][0], m_huff_bits[0+0], m_huff_val[0+0]); - compute_huffman_table(&m_huff_codes[2+0][0], &m_huff_code_sizes[2+0][0], m_huff_bits[2+0], m_huff_val[2+0]); - if (m_num_components > 1) - { - compute_huffman_table(&m_huff_codes[0+1][0], &m_huff_code_sizes[0+1][0], m_huff_bits[0+1], m_huff_val[0+1]); - compute_huffman_table(&m_huff_codes[2+1][0], &m_huff_code_sizes[2+1][0], m_huff_bits[2+1], m_huff_val[2+1]); - } - first_pass_init(); - emit_markers(); - m_pass_num = 2; - return true; -} - -bool jpeg_encoder::jpg_open(int p_x_res, int p_y_res, int src_channels) -{ - m_num_components = 3; - switch (m_params.m_subsampling) - { - case Y_ONLY: - { - m_num_components = 1; - m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1; - m_mcu_x = 8; m_mcu_y = 8; - break; - } - case H1V1: - { - m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1; - m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1; - m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1; - m_mcu_x = 8; m_mcu_y = 8; - break; - } - case H2V1: - { - m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 1; - m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1; - m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1; - m_mcu_x = 16; m_mcu_y = 8; - break; - } - case H2V2: - { - m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 2; - m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1; - m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1; - m_mcu_x = 16; m_mcu_y = 16; - } - } - - m_image_x = p_x_res; m_image_y = p_y_res; - m_image_bpp = src_channels; - m_image_bpl = m_image_x * src_channels; - m_image_x_mcu = (m_image_x + m_mcu_x - 1) & (~(m_mcu_x - 1)); - m_image_y_mcu = (m_image_y + m_mcu_y - 1) & (~(m_mcu_y - 1)); - m_image_bpl_xlt = m_image_x * m_num_components; - m_image_bpl_mcu = m_image_x_mcu * m_num_components; - m_mcus_per_row = m_image_x_mcu / m_mcu_x; - - if ((m_mcu_lines[0] = static_cast(jpge_malloc(m_image_bpl_mcu * m_mcu_y))) == NULL) return false; - for (int i = 1; i < m_mcu_y; i++) - m_mcu_lines[i] = m_mcu_lines[i-1] + m_image_bpl_mcu; - - compute_quant_table(m_quantization_tables[0], s_std_lum_quant); - compute_quant_table(m_quantization_tables[1], m_params.m_no_chroma_discrim_flag ? s_std_lum_quant : s_std_croma_quant); - - m_out_buf_left = JPGE_OUT_BUF_SIZE; - m_pOut_buf = m_out_buf; - - if (m_params.m_two_pass_flag) - { - clear_obj(m_huff_count); - first_pass_init(); - } - else - { - memcpy(m_huff_bits[0+0], s_dc_lum_bits, 17); memcpy(m_huff_val [0+0], s_dc_lum_val, DC_LUM_CODES); - memcpy(m_huff_bits[2+0], s_ac_lum_bits, 17); memcpy(m_huff_val [2+0], s_ac_lum_val, AC_LUM_CODES); - memcpy(m_huff_bits[0+1], s_dc_chroma_bits, 17); memcpy(m_huff_val [0+1], s_dc_chroma_val, DC_CHROMA_CODES); - memcpy(m_huff_bits[2+1], s_ac_chroma_bits, 17); memcpy(m_huff_val [2+1], s_ac_chroma_val, AC_CHROMA_CODES); - if (!second_pass_init()) return false; // in effect, skip over the first pass - } - return m_all_stream_writes_succeeded; -} - -void jpeg_encoder::load_block_8_8_grey(int x) -{ - uint8 *pSrc; - sample_array_t *pDst = m_sample_array; - x <<= 3; - for (int i = 0; i < 8; i++, pDst += 8) - { - pSrc = m_mcu_lines[i] + x; - pDst[0] = pSrc[0] - 128; pDst[1] = pSrc[1] - 128; pDst[2] = pSrc[2] - 128; pDst[3] = pSrc[3] - 128; - pDst[4] = pSrc[4] - 128; pDst[5] = pSrc[5] - 128; pDst[6] = pSrc[6] - 128; pDst[7] = pSrc[7] - 128; - } -} - -void jpeg_encoder::load_block_8_8(int x, int y, int c) -{ - uint8 *pSrc; - sample_array_t *pDst = m_sample_array; - x = (x * (8 * 3)) + c; - y <<= 3; - for (int i = 0; i < 8; i++, pDst += 8) - { - pSrc = m_mcu_lines[y + i] + x; - pDst[0] = pSrc[0 * 3] - 128; pDst[1] = pSrc[1 * 3] - 128; pDst[2] = pSrc[2 * 3] - 128; pDst[3] = pSrc[3 * 3] - 128; - pDst[4] = pSrc[4 * 3] - 128; pDst[5] = pSrc[5 * 3] - 128; pDst[6] = pSrc[6 * 3] - 128; pDst[7] = pSrc[7 * 3] - 128; - } -} - -void jpeg_encoder::load_block_16_8(int x, int c) -{ - uint8 *pSrc1, *pSrc2; - sample_array_t *pDst = m_sample_array; - x = (x * (16 * 3)) + c; - int a = 0, b = 2; - for (int i = 0; i < 16; i += 2, pDst += 8) - { - pSrc1 = m_mcu_lines[i + 0] + x; - pSrc2 = m_mcu_lines[i + 1] + x; - pDst[0] = ((pSrc1[ 0 * 3] + pSrc1[ 1 * 3] + pSrc2[ 0 * 3] + pSrc2[ 1 * 3] + a) >> 2) - 128; pDst[1] = ((pSrc1[ 2 * 3] + pSrc1[ 3 * 3] + pSrc2[ 2 * 3] + pSrc2[ 3 * 3] + b) >> 2) - 128; - pDst[2] = ((pSrc1[ 4 * 3] + pSrc1[ 5 * 3] + pSrc2[ 4 * 3] + pSrc2[ 5 * 3] + a) >> 2) - 128; pDst[3] = ((pSrc1[ 6 * 3] + pSrc1[ 7 * 3] + pSrc2[ 6 * 3] + pSrc2[ 7 * 3] + b) >> 2) - 128; - pDst[4] = ((pSrc1[ 8 * 3] + pSrc1[ 9 * 3] + pSrc2[ 8 * 3] + pSrc2[ 9 * 3] + a) >> 2) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3] + pSrc2[10 * 3] + pSrc2[11 * 3] + b) >> 2) - 128; - pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3] + pSrc2[12 * 3] + pSrc2[13 * 3] + a) >> 2) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3] + pSrc2[14 * 3] + pSrc2[15 * 3] + b) >> 2) - 128; - int temp = a; a = b; b = temp; - } -} - -void jpeg_encoder::load_block_16_8_8(int x, int c) -{ - uint8 *pSrc1; - sample_array_t *pDst = m_sample_array; - x = (x * (16 * 3)) + c; - for (int i = 0; i < 8; i++, pDst += 8) - { - pSrc1 = m_mcu_lines[i + 0] + x; - pDst[0] = ((pSrc1[ 0 * 3] + pSrc1[ 1 * 3]) >> 1) - 128; pDst[1] = ((pSrc1[ 2 * 3] + pSrc1[ 3 * 3]) >> 1) - 128; - pDst[2] = ((pSrc1[ 4 * 3] + pSrc1[ 5 * 3]) >> 1) - 128; pDst[3] = ((pSrc1[ 6 * 3] + pSrc1[ 7 * 3]) >> 1) - 128; - pDst[4] = ((pSrc1[ 8 * 3] + pSrc1[ 9 * 3]) >> 1) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3]) >> 1) - 128; - pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3]) >> 1) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3]) >> 1) - 128; - } -} - -void jpeg_encoder::load_quantized_coefficients(int component_num) -{ - int32 *q = m_quantization_tables[component_num > 0]; - int16 *pDst = m_coefficient_array; - for (int i = 0; i < 64; i++) - { - sample_array_t j = m_sample_array[s_zag[i]]; - if (j < 0) - { - if ((j = -j + (*q >> 1)) < *q) - *pDst++ = 0; - else - *pDst++ = static_cast(-(j / *q)); - } - else - { - if ((j = j + (*q >> 1)) < *q) - *pDst++ = 0; - else - *pDst++ = static_cast((j / *q)); - } - q++; - } -} - -void jpeg_encoder::flush_output_buffer() -{ - if (m_out_buf_left != JPGE_OUT_BUF_SIZE) - m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_buf(m_out_buf, JPGE_OUT_BUF_SIZE - m_out_buf_left); - m_pOut_buf = m_out_buf; - m_out_buf_left = JPGE_OUT_BUF_SIZE; -} - -void jpeg_encoder::put_bits(uint bits, uint len) -{ - m_bit_buffer |= ((uint32)bits << (24 - (m_bits_in += len))); - while (m_bits_in >= 8) - { - uint8 c; - #define JPGE_PUT_BYTE(c) { *m_pOut_buf++ = (c); if (--m_out_buf_left == 0) flush_output_buffer(); } - JPGE_PUT_BYTE(c = (uint8)((m_bit_buffer >> 16) & 0xFF)); - if (c == 0xFF) JPGE_PUT_BYTE(0); - m_bit_buffer <<= 8; - m_bits_in -= 8; - } -} - -void jpeg_encoder::code_coefficients_pass_one(int component_num) -{ - if (component_num >= 3) return; // just to shut up static analysis - int i, run_len, nbits, temp1; - int16 *src = m_coefficient_array; - uint32 *dc_count = component_num ? m_huff_count[0 + 1] : m_huff_count[0 + 0], *ac_count = component_num ? m_huff_count[2 + 1] : m_huff_count[2 + 0]; - - temp1 = src[0] - m_last_dc_val[component_num]; - m_last_dc_val[component_num] = src[0]; - if (temp1 < 0) temp1 = -temp1; - - nbits = 0; - while (temp1) - { - nbits++; temp1 >>= 1; - } - - dc_count[nbits]++; - for (run_len = 0, i = 1; i < 64; i++) - { - if ((temp1 = m_coefficient_array[i]) == 0) - run_len++; - else - { - while (run_len >= 16) - { - ac_count[0xF0]++; - run_len -= 16; - } - if (temp1 < 0) temp1 = -temp1; - nbits = 1; - while (temp1 >>= 1) nbits++; - ac_count[(run_len << 4) + nbits]++; - run_len = 0; - } - } - if (run_len) ac_count[0]++; -} - -void jpeg_encoder::code_coefficients_pass_two(int component_num) -{ - int i, j, run_len, nbits, temp1, temp2; - int16 *pSrc = m_coefficient_array; - uint *codes[2]; - uint8 *code_sizes[2]; - - if (component_num == 0) - { - codes[0] = m_huff_codes[0 + 0]; codes[1] = m_huff_codes[2 + 0]; - code_sizes[0] = m_huff_code_sizes[0 + 0]; code_sizes[1] = m_huff_code_sizes[2 + 0]; - } - else - { - codes[0] = m_huff_codes[0 + 1]; codes[1] = m_huff_codes[2 + 1]; - code_sizes[0] = m_huff_code_sizes[0 + 1]; code_sizes[1] = m_huff_code_sizes[2 + 1]; - } - - temp1 = temp2 = pSrc[0] - m_last_dc_val[component_num]; - m_last_dc_val[component_num] = pSrc[0]; - - if (temp1 < 0) - { - temp1 = -temp1; temp2--; - } - - nbits = 0; - while (temp1) - { - nbits++; temp1 >>= 1; - } - - put_bits(codes[0][nbits], code_sizes[0][nbits]); - if (nbits) put_bits(temp2 & ((1 << nbits) - 1), nbits); - - for (run_len = 0, i = 1; i < 64; i++) - { - if ((temp1 = m_coefficient_array[i]) == 0) - run_len++; - else - { - while (run_len >= 16) - { - put_bits(codes[1][0xF0], code_sizes[1][0xF0]); - run_len -= 16; - } - if ((temp2 = temp1) < 0) - { - temp1 = -temp1; - temp2--; - } - nbits = 1; - while (temp1 >>= 1) - nbits++; - j = (run_len << 4) + nbits; - put_bits(codes[1][j], code_sizes[1][j]); - put_bits(temp2 & ((1 << nbits) - 1), nbits); - run_len = 0; - } - } - if (run_len) - put_bits(codes[1][0], code_sizes[1][0]); -} - -void jpeg_encoder::code_block(int component_num) -{ - DCT2D(m_sample_array); - load_quantized_coefficients(component_num); - if (m_pass_num == 1) - code_coefficients_pass_one(component_num); - else - code_coefficients_pass_two(component_num); -} - -void jpeg_encoder::process_mcu_row() -{ - if (m_num_components == 1) - { - for (int i = 0; i < m_mcus_per_row; i++) - { - load_block_8_8_grey(i); code_block(0); - } - } - else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1)) - { - for (int i = 0; i < m_mcus_per_row; i++) - { - load_block_8_8(i, 0, 0); code_block(0); load_block_8_8(i, 0, 1); code_block(1); load_block_8_8(i, 0, 2); code_block(2); - } - } - else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1)) - { - for (int i = 0; i < m_mcus_per_row; i++) - { - load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0); - load_block_16_8_8(i, 1); code_block(1); load_block_16_8_8(i, 2); code_block(2); - } - } - else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2)) - { - for (int i = 0; i < m_mcus_per_row; i++) - { - load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0); - load_block_8_8(i * 2 + 0, 1, 0); code_block(0); load_block_8_8(i * 2 + 1, 1, 0); code_block(0); - load_block_16_8(i, 1); code_block(1); load_block_16_8(i, 2); code_block(2); - } - } -} - -bool jpeg_encoder::terminate_pass_one() -{ - optimize_huffman_table(0+0, DC_LUM_CODES); optimize_huffman_table(2+0, AC_LUM_CODES); - if (m_num_components > 1) - { - optimize_huffman_table(0+1, DC_CHROMA_CODES); optimize_huffman_table(2+1, AC_CHROMA_CODES); - } - return second_pass_init(); -} - -bool jpeg_encoder::terminate_pass_two() -{ - put_bits(0x7F, 7); - flush_output_buffer(); - emit_marker(M_EOI); - m_pass_num++; // purposely bump up m_pass_num, for debugging - return true; -} - -bool jpeg_encoder::process_end_of_image() -{ - if (m_mcu_y_ofs) - { - if (m_mcu_y_ofs < 16) // check here just to shut up static analysis - { - for (int i = m_mcu_y_ofs; i < m_mcu_y; i++) - memcpy(m_mcu_lines[i], m_mcu_lines[m_mcu_y_ofs - 1], m_image_bpl_mcu); - } - - process_mcu_row(); - } - - if (m_pass_num == 1) - return terminate_pass_one(); - else - return terminate_pass_two(); -} - -void jpeg_encoder::load_mcu(const void *pSrc) -{ - const uint8* Psrc = reinterpret_cast(pSrc); - - uint8* pDst = m_mcu_lines[m_mcu_y_ofs]; // OK to write up to m_image_bpl_xlt bytes to pDst - - if (m_num_components == 1) - { - if (m_image_bpp == 4) - RGBA_to_Y(pDst, Psrc, m_image_x); - else if (m_image_bpp == 3) - RGB_to_Y(pDst, Psrc, m_image_x); - else - memcpy(pDst, Psrc, m_image_x); - } - else - { - if (m_image_bpp == 4) - RGBA_to_YCC(pDst, Psrc, m_image_x); - else if (m_image_bpp == 3) - RGB_to_YCC(pDst, Psrc, m_image_x); - else - Y_to_YCC(pDst, Psrc, m_image_x); - } - - // Possibly duplicate pixels at end of scanline if not a multiple of 8 or 16 - if (m_num_components == 1) - memset(m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt, pDst[m_image_bpl_xlt - 1], m_image_x_mcu - m_image_x); - else - { - const uint8 y = pDst[m_image_bpl_xlt - 3 + 0], cb = pDst[m_image_bpl_xlt - 3 + 1], cr = pDst[m_image_bpl_xlt - 3 + 2]; - uint8 *q = m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt; - for (int i = m_image_x; i < m_image_x_mcu; i++) - { - *q++ = y; *q++ = cb; *q++ = cr; - } - } - - if (++m_mcu_y_ofs == m_mcu_y) - { - process_mcu_row(); - m_mcu_y_ofs = 0; - } -} - -void jpeg_encoder::clear() -{ - m_mcu_lines[0] = NULL; - m_pass_num = 0; - m_all_stream_writes_succeeded = true; -} - -jpeg_encoder::jpeg_encoder() -{ - clear(); -} - -jpeg_encoder::~jpeg_encoder() -{ - deinit(); -} - -bool jpeg_encoder::init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params) -{ - deinit(); - if (((!pStream) || (width < 1) || (height < 1)) || ((src_channels != 1) && (src_channels != 3) && (src_channels != 4)) || (!comp_params.check_valid())) return false; - m_pStream = pStream; - m_params = comp_params; - return jpg_open(width, height, src_channels); -} - -void jpeg_encoder::deinit() -{ - jpge_free(m_mcu_lines[0]); - clear(); -} - -bool jpeg_encoder::process_scanline(const void* pScanline) -{ - if ((m_pass_num < 1) || (m_pass_num > 2)) return false; - if (m_all_stream_writes_succeeded) - { - if (!pScanline) - { - if (!process_end_of_image()) return false; - } - else - { - load_mcu(pScanline); - } - } - return m_all_stream_writes_succeeded; -} - -// Higher level wrappers/examples (optional). -#include - -class cfile_stream : public output_stream -{ - cfile_stream(const cfile_stream &); - cfile_stream &operator= (const cfile_stream &); - - FILE* m_pFile; - bool m_bStatus; - -public: - cfile_stream() : m_pFile(NULL), m_bStatus(false) { } - - virtual ~cfile_stream() - { - close(); - } - - bool open(const char *pFilename) - { - close(); -#if defined(_MSC_VER) - if (fopen_s(&m_pFile, pFilename, "wb") != 0) - { - return false; - } -#else - m_pFile = fopen(pFilename, "wb"); -#endif - m_bStatus = (m_pFile != NULL); - return m_bStatus; - } - - bool close() - { - if (m_pFile) - { - if (fclose(m_pFile) == EOF) - { - m_bStatus = false; - } - m_pFile = NULL; - } - return m_bStatus; - } - - virtual bool put_buf(const void* pBuf, int64_t len) - { - m_bStatus = m_bStatus && (fwrite(pBuf, len, 1, m_pFile) == 1); - return m_bStatus; - } - - uint get_size() const - { - return m_pFile ? ftell(m_pFile) : 0; - } -}; - -// Writes JPEG image to file. -bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params) -{ - cfile_stream dst_stream; - if (!dst_stream.open(pFilename)) - return false; - - jpge::jpeg_encoder dst_image; - if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params)) - return false; - - for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++) - { - for (int64_t i = 0; i < height; i++) - { - // i, width, and num_channels are all 64bit - const uint8* pBuf = pImage_data + i * width * num_channels; - if (!dst_image.process_scanline(pBuf)) - return false; - } - if (!dst_image.process_scanline(NULL)) - return false; - } - - dst_image.deinit(); - - return dst_stream.close(); -} - -class memory_stream : public output_stream -{ - memory_stream(const memory_stream &); - memory_stream &operator= (const memory_stream &); - - uint8 *m_pBuf; - uint64_t m_buf_size, m_buf_ofs; - -public: - memory_stream(void *pBuf, uint64_t buf_size) : m_pBuf(static_cast(pBuf)), m_buf_size(buf_size), m_buf_ofs(0) { } - - virtual ~memory_stream() { } - - virtual bool put_buf(const void* pBuf, int64_t len) - { - uint64_t buf_remaining = m_buf_size - m_buf_ofs; - if ((uint64_t)len > buf_remaining) - return false; - memcpy(m_pBuf + m_buf_ofs, pBuf, len); - m_buf_ofs += len; - return true; - } - - uint64_t get_size() const - { - return m_buf_ofs; - } -}; - -bool compress_image_to_jpeg_file_in_memory(void *pDstBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params) -{ - if ((!pDstBuf) || (!buf_size)) - return false; - - memory_stream dst_stream(pDstBuf, buf_size); - - buf_size = 0; - - jpge::jpeg_encoder dst_image; - if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params)) - return false; - - for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++) - { - for (int64_t i = 0; i < height; i++) - { - const uint8* pScanline = pImage_data + i * width * num_channels; - if (!dst_image.process_scanline(pScanline)) - return false; - } - if (!dst_image.process_scanline(NULL)) - return false; - } - - dst_image.deinit(); - - buf_size = dst_stream.get_size(); - return true; -} - -} // namespace jpge \ No newline at end of file diff --git a/spaces/shencc/gpt/docs/README_RS.md b/spaces/shencc/gpt/docs/README_RS.md deleted file mode 100644 index f8d925a27a6e5a19304db6f6d266e3bb3163172f..0000000000000000000000000000000000000000 --- a/spaces/shencc/gpt/docs/README_RS.md +++ /dev/null @@ -1,291 +0,0 @@ -> **Note** -> -> Этот файл самовыражения автоматически генерируется модулем перевода markdown в этом проекте и может быть не на 100% правильным. -> - -# ChatGPT Academic Optimization - -**Если вам понравился этот проект, пожалуйста, поставьте ему звезду. Если вы придумали более полезные академические ярлыки или функциональные плагины, не стесняйтесь создавать запросы на изменение или пул-запросы. Мы также имеем [README на английском языке](docs/README_EN.md), переведенный этим же проектом. - -> **Примечание** -> -> 1. Пожалуйста, обратите внимание, что только функциonal plugins (buttons) с **красным цветом** могут читать файлы, некоторые из которых находятся в **выпадающем меню** плагинов. Кроме того, мы приветствуем и обрабатываем любые новые плагины с **наивысшим приоритетом**! -> -> 2. Функции каждого файла в этом проекте подробно описаны в собственном анализе [`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) . При повторных итерациях вы также можете вызывать обновленный отчет функций проекта, щелкнув соответствующий функциональный плагин GPT. Часто задаваемые вопросы собраны в [`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98) . - -
            - -Функция | Описание ---- | --- -Редактирование одним кликом | Поддержка редактирования одним кликом, поиск грамматических ошибок в академических статьях -Переключение языков "Английский-Китайский" одним кликом | Одним кликом переключайте языки "Английский-Китайский" -Разъяснение программного кода одним кликом | Вы можете правильно отобразить и объяснить программный код. -[Настраиваемые сочетания клавиш](https://www.bilibili.com/video/BV14s4y1E7jN) | Поддержка настраиваемых сочетаний клавиш -[Настройка сервера-прокси](https://www.bilibili.com/video/BV1rc411W7Dr) | Поддержка настройки сервера-прокси -Модульный дизайн | Поддержка настраиваемых функциональных плагинов высших порядков и функциональных плагинов, поддерживающих [горячее обновление](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[Автоанализ программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] [Прочтение в один клик](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) кода программы проекта -[Анализ программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] Один клик для проанализирования дерева других проектов Python/C/C++/Java/Lua/... -Чтение статей| [Функциональный плагин] Одним кликом прочитайте весь латех (LaTex) текст статьи и сгенерируйте краткое описание -Перевод и редактирование всех статей из LaTex | [Функциональный плагин] Перевод или редактирование LaTex-статьи всего одним нажатием кнопки -Генерация комментариев в пакетном режиме | [Функциональный плагин] Одним кликом сгенерируйте комментарии к функциям в пакетном режиме -Генерация отчетов пакета CHAT | [Функциональный плагин] Автоматически создавайте сводные отчеты после выполнения -[Помощник по arxiv](https://www.bilibili.com/video/BV1LM4y1279X) | [Функциональный плагин] Введите URL статьи arxiv, чтобы легко перевести резюме и загрузить PDF-файл -[Перевод полного текста статьи в формате PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Функциональный плагин] Извлеките заголовок статьи, резюме и переведите весь текст статьи (многопоточно) -[Помощник интеграции Google Scholar](https://www.bilibili.com/video/BV19L411U7ia) | [Функциональный плагин] Дайте GPT выбрать для вас интересные статьи на любой странице поиска Google Scholar. -Отображение формул/изображений/таблиц | Одновременно отображается tex-форма и рендер-форма формул, поддержка формул, высокоскоростных кодов -Поддержка функциональных плагинов многопоточности | Поддержка многопоточной работы с плагинами, обрабатывайте огромные объемы текста или программы одним кликом -Запуск темной темы gradio[подробнее](https://github.com/binary-husky/chatgpt_academic/issues/173) | Добавьте / ?__dark-theme=true в конец URL браузера, чтобы переключиться на темную тему. -[Поддержка нескольких моделей LLM](https://www.bilibili.com/video/BV1wT411p7yf), поддержка API2D | Находиться между GPT3.5, GPT4 и [清华ChatGLM](https://github.com/THUDM/ChatGLM-6B) должно быть очень приятно, не так ли? -Альтернатива huggingface без использования научной сети [Онлайн-эксперимент](https://huggingface.co/spaces/qingxu98/gpt-academic) | Войдите в систему, скопируйте пространство [этот пространственный URL](https://huggingface.co/spaces/qingxu98/gpt-academic) -…… | …… - - -
            - -- Новый интерфейс (вы можете изменить настройку LAYOUT в config.py, чтобы переключаться между "горизонтальным расположением" и "вертикальным расположением") -
            - -
            - - -Вы профессиональный переводчик научных статей. - -- Все кнопки генерируются динамически путем чтения functional.py и могут быть легко настроены под пользовательские потребности, освобождая буфер обмена. -
            - -
            - -- Редактирование/корректирование -
            - -
            - -- Если вывод содержит формулы, они отображаются одновременно как в формате tex, так и в рендеринговом формате для удобства копирования и чтения. -
            - -
            - -- Лень смотреть код проекта? Просто покажите chatgpt. -
            - -
            - -- Несколько моделей больших языковых моделей смешиваются (ChatGLM + OpenAI-GPT3.5 + [API2D] (https://api2d.com/) -GPT4) -
            - -
            - -Несколько моделей больших языковых моделей смешиваются в [бета-версии huggingface] (https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta) (huggingface-версия не поддерживает chatglm). - - ---- - -## Установка - Метод 1: Запуск (Windows, Linux или MacOS) - -1. Скачайте проект -```sh -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -``` - -2. Настройка API_KEY и настройки прокси - -В файле `config.py` настройте зарубежный прокси и OpenAI API KEY, пояснения ниже -``` -1. Если вы находитесь в Китае, вам нужно настроить зарубежный прокси, чтобы использовать OpenAI API. Пожалуйста, внимательно прочитайте config.py для получения инструкций (1. Измените USE_PROXY на True; 2. Измените прокси в соответствии с инструкциями). -2. Настройка API KEY OpenAI. Вам необходимо зарегистрироваться на сайте OpenAI и получить API KEY. После получения API KEY настройте его в файле config.py. -3. Вопросы, связанные с сетевыми проблемами (тайм-аут сети, прокси не работает), можно найти здесь: https://github.com/binary-husky/chatgpt_academic/issues/1 -``` -(Примечание: при запуске программы будет проверяться наличие конфиденциального файла конфигурации с именем `config_private.py` и использоваться в нем конфигурация параметров, которая перезаписывает параметры с такими же именами в `config.py`. Поэтому, если вы понимаете логику чтения нашей конфигурации, мы настоятельно рекомендуем вам создать новый файл конфигурации с именем `config_private.py` рядом с `config.py` и переместить (скопировать) настройки из `config.py` в `config_private.py`. `config_private.py` не подвергается контролю git, что делает конфиденциальную информацию более безопасной.) - - -3. Установить зависимости -```sh -# (Выбор 1) Рекомендуется -python -m pip install -r requirements.txt - -# (Выбор 2) Если вы используете anaconda, то шаги будут аналогичны: -# (Шаг 2.1) conda create -n gptac_venv python=3.11 -# (Шаг 2.2) conda activate gptac_venv -# (Шаг 2.3) python -m pip install -r requirements.txt - -# Примечание: используйте официальный источник pip или источник pip.aliyun.com. Другие источники pip могут вызывать проблемы. временный метод замены источника: -# python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -``` - -Если требуется поддержка TUNA ChatGLM, необходимо установить дополнительные зависимости (если вы неудобны с python, необходимо иметь хорошую конфигурацию компьютера): -```sh -python -m pip install -r request_llm/requirements_chatglm.txt -``` - -4. Запустите -```sh -python main.py -``` - -5. Тестовые функции плагина -``` -- Тестирвоание анализа проекта Python - В основной области введите `./crazy_functions/test_project/python/dqn` , а затем нажмите "Анализировать весь проект Python" -- Тестирование самостоятельного чтения кода - Щелкните " [Демонстрационный режим многопоточности] Проанализируйте сам проект (расшифровка источника кода)" -- Тестирование функций шаблонного плагина (вы можете использовать эту функцию как шаблон для более сложных функций, требующих ответа от gpt в связи с тем, что произошло сегодня в истории) - Щелкните " [Функции шаблонного плагина] День в истории" -- На нижней панели дополнительные функции для выбора -``` - -## Установка - Метод 2: Использование docker (Linux) - - -1. Только ChatGPT (рекомендуется для большинства пользователей): -``` sh -# Скачать проект -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -# Настроить прокси за границей и OpenAI API KEY -Отредактируйте файл config.py в любом текстовом редакторе. -# Установка -docker build -t gpt-academic . -# Запустить -docker run --rm -it --net=host gpt-academic - -# Проверка функциональности плагина -## Проверка шаблонной функции плагина (требуется, чтобы gpt ответил, что произошло "в истории на этот день"), вы можете использовать эту функцию в качестве шаблона для реализации более сложных функций. -Нажмите "[Шаблонный демонстрационный плагин] История на этот день". -## Тест абстрактного резюме для проекта на Latex -В области ввода введите ./crazy_functions/test_project/latex/attention, а затем нажмите "Чтение реферата о тезисах статьи на LaTeX". -## Тестовый анализ проекта на Python -Введите в область ввода ./crazy_functions/test_project/python/dqn, затем нажмите "Проанализировать весь проект на Python". - -Выбирайте больше функциональных плагинов в нижнем выпадающем меню. -``` - -2. ChatGPT + ChatGLM (требуется глубокое знание Docker и достаточно мощное компьютерное оборудование): - -``` sh -# Изменение Dockerfile -cd docs && nano Dockerfile+ChatGLM -# Как построить | Как запустить (Dockerfile+ChatGLM в пути docs, сначала перейдите в папку с помощью cd docs) -docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM . -# Как запустить | Как запустить (2) я хочу войти в контейнер и сделать какие-то настройки до запуска: -docker run --rm -it --net=host --gpus=all gpt-academic bash -``` - - -## Установка-Метод 3: Другие способы развертывания - -1. Развертывание на удаленном облачном сервере -Пожалуйста, посетите [Deploy Wiki-1] (https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -2. Использование WSL2 (Windows Subsystem for Linux) -Пожалуйста, посетите [Deploy Wiki-2] (https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - - -## Установка-Настройки прокси -### Метод 1: Обычный способ -[Конфигурация прокси] (https://github.com/binary-husky/chatgpt_academic/issues/1) - -### Метод 2: Руководство новичка -[Руководство новичка] (https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89) - - ---- - -## Настройка новой удобной кнопки (настройка быстрой клавиши для научной работы) -Откройте `core_functional.py` любым текстовым редактором, добавьте элементы, как показано ниже, затем перезапустите программу. (Если кнопка уже успешно добавлена и видна, то префикс и суффикс поддерживают горячее изменение, чтобы они оказались в действии, не нужно перезапускать программу.) -например -``` -"Супер анг-рус": { - # Префикс, будет добавлен перед вашим вводом. Например, используется для описания ваших потребностей, таких как перевод, кодинг, редактирование и т. д. - "Prefix": "Пожалуйста, переведите этот фрагмент на русский язык, а затем создайте пошаговую таблицу в markdown, чтобы объяснить все специализированные термины, которые встречаются в тексте:\n\n", - - # Суффикс, будет добавлен после вашего ввода. Например, совместно с префиксом можно обрамить ваш ввод в кавычки. - "Suffix": "", -}, -``` -
            - -
            - ---- - - -## Демонстрация некоторых возможностей - -### Отображение изображений: - -
            - -
            - - -### Если программа может понимать и разбирать сама себя: - -
            - -
            - -
            - -
            - - -### Анализ других проектов на Python/Cpp: -
            - -
            - -
            - -
            - -### Генерация понимания и абстрактов с помощью Латех статей в один клик -
            - -
            - -### Автоматическое создание отчетов -
            - - - -
            - -### Модульный дизайн функций -
            - - -
            - - -### Трансляция исходного кода на английский язык - -
            - -
            - -## Todo и планирование версий: -- version 3.2+ (todo): функция плагины поддерживают более многочисленные интерфейсы параметров -- version 3.1: поддержка одновременного опроса нескольких моделей gpt! Поддержка api2d, поддержка балансировки нагрузки множества apikey. -- version 3.0: поддержка chatglm и других маленьких llm -- version 2.6: реструктурировал структуру плагинов, повысил интерактивность, добавил больше плагинов -- version 2.5: само обновление, решение проблемы слишком длинного текста и переполнения токена при переводе всего проекта исходного кода -- version 2.4: (1) добавлена функция перевода всего PDF-документа; (2) добавлена функция изменения положения входной области; (3) добавлена опция вертикального макета; (4) оптимизация функций многопоточности плагина. -- version 2.3: улучшение многопоточной интерактивности -- version 2.2: функция плагинов поддерживает горячую перезагрузку -- version 2.1: блочная раскладка -- version 2.0: модульный дизайн функций плагина -- version 1.0: основные функции - -## Ссылки на изучение и обучение - -``` -В коде использовано много хороших дизайнерских решений из других отличных проектов, в том числе: - -# Project1: использование многих приемов из ChuanhuChatGPT -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Project2: ChatGLM-6B в Тхуде: -https://github.com/THUDM/ChatGLM-6B -``` - diff --git a/spaces/shivalk/myfirst/app.py b/spaces/shivalk/myfirst/app.py deleted file mode 100644 index ef3fdc5714ef1e7e85eae6412a10d6426ca7873e..0000000000000000000000000000000000000000 --- a/spaces/shivalk/myfirst/app.py +++ /dev/null @@ -1,9 +0,0 @@ -import streamlit as st -import pandas as pd -st.title('A Simple Streamlit Web App') -name = st.text_input('Enter your name', '') -st.write(f'Hello {name}!') -x = st.slider('Select an integer x', 0, 10, 1) -y = st.slider('Select an integer y', 0, 10, 1) -df = pd.DataFrame({'x': [x], 'y': [y] , 'x + y': [x + y]}, index = ['addition row']) -st.write(df) \ No newline at end of file diff --git a/spaces/shivammittal274/LLM_CA/chatWithCache.py b/spaces/shivammittal274/LLM_CA/chatWithCache.py deleted file mode 100644 index 70bd104e91d7dc4a458fd23310008ebe05d697f6..0000000000000000000000000000000000000000 --- a/spaces/shivammittal274/LLM_CA/chatWithCache.py +++ /dev/null @@ -1,134 +0,0 @@ -from os import environ -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores import Milvus -from langchain.document_loaders import PyPDFLoader -import pickle -import gptcache -import time -import numpy as np -from gptcache.manager import CacheBase, VectorBase, get_data_manager -from gptcache.embedding import OpenAI as openai_embedding -import langchain -from langchain.llms import OpenAI -from gptcache.adapter.langchain_models import LangChainLLMs -import langchain -from langchain.chains.question_answering import load_qa_chain -from langchain.llms import OpenAI -from langchain.chains import ConversationalRetrievalChain -from cosine_sim_eval import CosineSimEvaluation -from langchain.memory import ConversationBufferMemory -from query_data import get_chain -from gptcache import cache -from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation -from langchain.vectorstores.faiss import FAISS -from langchain.chat_models import ChatOpenAI -import os -from dotenv import load_dotenv -load_dotenv() - -api_key = os.getenv("OPENAI_API_KEY") - - -# loader = PyPDFLoader("/home/ubuntu/shivam/pdf-summ/chapter1.pdf") -# pages = loader.load_and_split() -# print (f'You have {len(pages)} page(s) in your data') - -# # We need to split long documents into smaller chunks to stay under the LLM token limit. -# text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0) -# texts = text_splitter.split_documents(pages) -# print (f'Now you have {len(texts)} documents') -documents = np.load("documents.npy", allow_pickle=True) -embeddings = OpenAIEmbeddings() -# vectorstore = Milvus.from_documents( -# texts, -# embeddings, -# connection_args={"host": MILVUS_HOST, "port": MILVUS_PORT} -# ) -vectorstore = FAISS.from_documents(documents, embeddings) -with open("vectorstore.pkl", "wb") as f: - pickle.dump(vectorstore, f) -print (f'Milvus store created.') - - -#retriever = vector_db.as_retriever(search_type='similarity', search_kwargs={'k':1}) -#chain = RetrievalQAWithSourcesChain.from_chain_type(llm_openai, chain_type="stuff", retriever=retriever) - -def get_content_func(data, **_): - return data.get("prompt").split("Question")[-1] - -print("Cache loading.....") -openai_emb = openai_embedding() -# onnx = Onnx() - -data_manager = get_data_manager(CacheBase("sqlite"), VectorBase("faiss", dimension=openai_emb.dimension)) -cache.init( - pre_embedding_func=get_content_func, - embedding_func=openai_emb.to_embeddings, - data_manager=data_manager, - similarity_evaluation=CosineSimEvaluation(), - ) -cache.set_openai_key() -# langchain.llm_cache = GPTCache(init_gptcache_map) -llm=ChatOpenAI(model='gpt-4', temperature=0) -chain = get_chain(llm, vectorstore=vectorstore) - -questions = [ - "what's democracy", - "what is democracy ", - "Explain democracy", - # "how democracy is related to elections", - # "What are the different features of democracy", - # "What are elections?", - # "why is government needed", - # "what is your name", - "Is India a democracy", - "What is food", - "why are elections held", - "summary on chapter 1", - "give TL;DR summary of chapter 1", - "How is democracy related to independence?" -] - - -def llm_qa(question, chat_history): - # question += 'Instructions: No need to answer from text necessarily, If question is related to maths, answer using logic.' - start_time = time.time() - docs = (vectorstore.similarity_search(question)) - # response = chain({"question": question, "chat_history": []}) - - mapped_qa = [[{"role": "user", "content": question}, {"role": "assistant", "content": answer}] for question, answer in chat_history] - - # Flatten the list of lists - mapped_qa = [item for sublist in mapped_qa for item in sublist] - - import openai - - openai.api_key =api_key - # question = "What deducations I can claim in India?" - response = (openai.ChatCompletion.create( - model="gpt-4", - messages=[ - {"role": "system", "content": """You are a CA having good knowledge of Indian Income tax rules. I would also try to share important information in context you may require - while answering the question in Context. You can use that information while answering. - Example: Context: User's Question: - - And Lets say, if user asks something but you require more information from user, you can ask the user, Dont assume any of the information, Ask the user for the information which you - Example User ask can you compute my HRA component for me, ask whats your basic salary, HRA components etc whichever required to you """}, - *mapped_qa, - {"role": "user", f"content": f"Context: {docs} \nQuestion: {question}"} - ] - )) - print(response) - - # response = chain({"question": question}, return_only_outputs=True) - # response = chain({"question": question}, return_only_outputs=True) - time_taken = time.time() - start_time - print("Time consuming: {:.2f}s".format(time_taken)) - return response['choices'][0]['message']['content'], "Time taken: {:.2f}s".format(time_taken) - # print(f'Answer: {response}\n') - return response['answer'], "Time taken: {:.2f}s".format(time_taken) - -if __name__ == '__main__': - for question in questions: - print(llm_qa(question=question)) \ No newline at end of file diff --git a/spaces/slush0/petals-playground/chat_client.py b/spaces/slush0/petals-playground/chat_client.py deleted file mode 100644 index 41d60bc9262b1b3b4ad5a6448a50f68ebaa1a207..0000000000000000000000000000000000000000 --- a/spaces/slush0/petals-playground/chat_client.py +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env python -import json -import sys - -# pip install websocket-client -import websocket - -class ModelClient(object): - def __init__(self, endpoint_url): - self.endpoint_url = endpoint_url - self.ws = None - self.model = None - - def open_session(self, model, max_length): - self.ws = websocket.create_connection(self.endpoint_url, enable_multithread=True) - self.model = model - payload = { - "type": "open_inference_session", - "model": self.model, - "max_length": max_length, - } - self.ws.send(json.dumps(payload)) - assert json.loads(self.ws.recv())['ok'] == True - - def is_session(self): - return self.ws != None - - def close_session(self): - if self.ws: - self.ws.close() - self.ws = None - - def generate(self, prompt, **kwargs): - try: - return self._generate(prompt, **kwargs) - except: - self.close_session() - raise - - def _generate(self, prompt, **kwargs): - payload = { - "type": "generate", - "inputs": prompt, - "max_new_tokens": 1, - "do_sample": 0, - "temperature": 1, - "stop_sequence": "" if "bloomz" in self.model else "\n\n", - } - payload = {**payload, **kwargs} - self.ws.send(json.dumps(payload)) - - while True: - data = json.loads(self.ws.recv()) - if not data['ok']: - raise Exception(data['traceback']) - yield data['outputs'] - if data['stop']: - break - -def main(): - #client = ModelClient("ws://localhost:8000/api/v2/generate") - client = ModelClient("wss://chat.petals.dev/api/v2/generate") - client.open_session("stabilityai/StableBeluga2", 128) - - if len(sys.argv) > 1: - prompt = sys.argv[1] - # Bloomz variant uses
            instead of \n\n as an eos token - if not prompt.endswith("\n\n"): - prompt += "\n\n" - else: - prompt = "The SQL command to extract all the users whose name starts with A is: \n\n" - print(f"Prompt: {prompt}") - - # petals.client.routing.sequence_manager.MissingBlocksError - for out in client.generate(prompt, - do_sample=True, - temperature=0.75, - top_p=0.9): - print(out, end="", flush=True) - - client.close_session() - -if __name__ == '__main__': - main() diff --git a/spaces/smjain/smjainvoice/starganv2vc_paddle/meldataset.py b/spaces/smjain/smjainvoice/starganv2vc_paddle/meldataset.py deleted file mode 100644 index 2302da1926825afa81320643014f911c3086442b..0000000000000000000000000000000000000000 --- a/spaces/smjain/smjainvoice/starganv2vc_paddle/meldataset.py +++ /dev/null @@ -1,155 +0,0 @@ -#coding: utf-8 - -import os -import time -import random -import random -import paddle -import paddleaudio - -import numpy as np -import soundfile as sf -import paddle.nn.functional as F - -from paddle import nn -from paddle.io import DataLoader - -import logging -logger = logging.getLogger(__name__) -logger.setLevel(logging.DEBUG) - -np.random.seed(1) -random.seed(1) - -SPECT_PARAMS = { - "n_fft": 2048, - "win_length": 1200, - "hop_length": 300 -} -MEL_PARAMS = { - "n_mels": 80, - "n_fft": 2048, - "win_length": 1200, - "hop_length": 300 -} - -class MelDataset(paddle.io.Dataset): - def __init__(self, - data_list, - sr=24000, - validation=False, - ): - - _data_list = [l[:-1].split('|') for l in data_list] - self.data_list = [(path, int(label)) for path, label in _data_list] - self.data_list_per_class = { - target: [(path, label) for path, label in self.data_list if label == target] \ - for target in list(set([label for _, label in self.data_list]))} - - self.sr = sr - self.to_melspec = paddleaudio.features.MelSpectrogram(**MEL_PARAMS) - self.to_melspec.fbank_matrix[:] = paddle.load(os.path.dirname(__file__) + '/fbank_matrix.pd')['fbank_matrix'] - - self.mean, self.std = -4, 4 - self.validation = validation - self.max_mel_length = 192 - - def __len__(self): - return len(self.data_list) - - def __getitem__(self, idx): - with paddle.fluid.dygraph.guard(paddle.CPUPlace()): - data = self.data_list[idx] - mel_tensor, label = self._load_data(data) - ref_data = random.choice(self.data_list) - ref_mel_tensor, ref_label = self._load_data(ref_data) - ref2_data = random.choice(self.data_list_per_class[ref_label]) - ref2_mel_tensor, _ = self._load_data(ref2_data) - return mel_tensor, label, ref_mel_tensor, ref2_mel_tensor, ref_label - - def _load_data(self, path): - wave_tensor, label = self._load_tensor(path) - - if not self.validation: # random scale for robustness - random_scale = 0.5 + 0.5 * np.random.random() - wave_tensor = random_scale * wave_tensor - - mel_tensor = self.to_melspec(wave_tensor) - mel_tensor = (paddle.log(1e-5 + mel_tensor) - self.mean) / self.std - mel_length = mel_tensor.shape[1] - if mel_length > self.max_mel_length: - random_start = np.random.randint(0, mel_length - self.max_mel_length) - mel_tensor = mel_tensor[:, random_start:random_start + self.max_mel_length] - - return mel_tensor, label - - def _preprocess(self, wave_tensor, ): - mel_tensor = self.to_melspec(wave_tensor) - mel_tensor = (paddle.log(1e-5 + mel_tensor) - self.mean) / self.std - return mel_tensor - - def _load_tensor(self, data): - wave_path, label = data - label = int(label) - wave, sr = sf.read(wave_path) - wave_tensor = paddle.from_numpy(wave).astype(paddle.float32) - return wave_tensor, label - -class Collater(object): - """ - Args: - adaptive_batch_size (bool): if true, decrease batch size when long data comes. - """ - - def __init__(self, return_wave=False): - self.text_pad_index = 0 - self.return_wave = return_wave - self.max_mel_length = 192 - self.mel_length_step = 16 - self.latent_dim = 16 - - def __call__(self, batch): - batch_size = len(batch) - nmels = batch[0][0].shape[0] - mels = paddle.zeros((batch_size, nmels, self.max_mel_length)).astype(paddle.float32) - labels = paddle.zeros((batch_size)).astype(paddle.int64) - ref_mels = paddle.zeros((batch_size, nmels, self.max_mel_length)).astype(paddle.float32) - ref2_mels = paddle.zeros((batch_size, nmels, self.max_mel_length)).astype(paddle.float32) - ref_labels = paddle.zeros((batch_size)).astype(paddle.int64) - - for bid, (mel, label, ref_mel, ref2_mel, ref_label) in enumerate(batch): - mel_size = mel.shape[1] - mels[bid, :, :mel_size] = mel - - ref_mel_size = ref_mel.shape[1] - ref_mels[bid, :, :ref_mel_size] = ref_mel - - ref2_mel_size = ref2_mel.shape[1] - ref2_mels[bid, :, :ref2_mel_size] = ref2_mel - - labels[bid] = label - ref_labels[bid] = ref_label - - z_trg = paddle.randn((batch_size, self.latent_dim)) - z_trg2 = paddle.randn((batch_size, self.latent_dim)) - - mels, ref_mels, ref2_mels = mels.unsqueeze(1), ref_mels.unsqueeze(1), ref2_mels.unsqueeze(1) - return mels, labels, ref_mels, ref2_mels, ref_labels, z_trg, z_trg2 - -def build_dataloader(path_list, - validation=False, - batch_size=4, - num_workers=1, - collate_config={}, - dataset_config={}): - - dataset = MelDataset(path_list, validation=validation) - collate_fn = Collater(**collate_config) - data_loader = DataLoader(dataset, - batch_size=batch_size, - shuffle=(not validation), - num_workers=num_workers, - drop_last=(not validation), - collate_fn=collate_fn) - - return data_loader diff --git a/spaces/snoop2head/Gomoku-GPT2/README.md b/spaces/snoop2head/Gomoku-GPT2/README.md deleted file mode 100644 index 046a62419bb25741254b6a4d72080316d49230a6..0000000000000000000000000000000000000000 --- a/spaces/snoop2head/Gomoku-GPT2/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Gomoku GPT2 -emoji: ⚫️ -colorFrom: gray -colorTo: purple -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: true -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/sqc1729/bingi/cloudflare/worker.js b/spaces/sqc1729/bingi/cloudflare/worker.js deleted file mode 100644 index e0debd750615f1329b2c72fbce73e1b9291f7137..0000000000000000000000000000000000000000 --- a/spaces/sqc1729/bingi/cloudflare/worker.js +++ /dev/null @@ -1,18 +0,0 @@ -const TRAGET_HOST='hf4all-bingo.hf.space' // 请将此域名改成你自己的,域名信息在设置》站点域名查看。 - -export default { - async fetch(request) { - const uri = new URL(request.url); - if (uri.protocol === 'http:') { - uri.protocol = 'https:'; - return new Response('', { - status: 301, - headers: { - location: uri.toString(), - }, - }) - } - uri.host = TRAGET_HOST - return fetch(new Request(uri.toString(), request)); - }, -}; diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/data_scripts/check_self_overlaps.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/data_scripts/check_self_overlaps.py deleted file mode 100644 index 07b338dcfd2d7f10317608274631d0edd93ba889..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/data_scripts/check_self_overlaps.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import os -import glob -import argparse -from utils.dedup import deup -import sys - -WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) - -if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): - print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') - sys.exit(-1) - -def get_directions(folder): - raw_files = glob.glob(f'{folder}/train*') - directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files] - return directions - -def diff_list(lhs, rhs): - return set(lhs).difference(set(rhs)) - -def check_diff( - from_src_file, from_tgt_file, - to_src_file, to_tgt_file, -): - seen_in_from = set() - seen_src_in_from = set() - seen_tgt_in_from = set() - from_count = 0 - with open(from_src_file, encoding='utf-8') as fsrc, \ - open(from_tgt_file, encoding='utf-8') as ftgt: - for s, t in zip(fsrc, ftgt): - seen_in_from.add((s, t)) - seen_src_in_from.add(s) - seen_tgt_in_from.add(t) - from_count += 1 - common = 0 - common_src = 0 - common_tgt = 0 - to_count = 0 - seen = set() - - with open(to_src_file, encoding='utf-8') as fsrc, \ - open(to_tgt_file, encoding='utf-8') as ftgt: - for s, t in zip(fsrc, ftgt): - to_count += 1 - if (s, t) not in seen: - if (s, t) in seen_in_from: - common += 1 - if s in seen_src_in_from: - common_src += 1 - seen_src_in_from.remove(s) - if t in seen_tgt_in_from: - common_tgt += 1 - seen_tgt_in_from.remove(t) - seen.add((s, t)) - return common, common_src, common_tgt, from_count, to_count - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--folder", type=str, required=True, - help="the data folder ") - parser.add_argument("--split", type=str, default='test', - help="split (valid, test) to check against training data") - parser.add_argument('--directions', type=str, default=None, required=False) - - args = parser.parse_args() - - if args.directions is None: - directions = set(get_directions(args.folder)) - directions = sorted(directions) - else: - directions = args.directions.split(',') - directions = sorted(set(directions)) - - results = [] - print(f'checking where {args.split} split data are in training') - print(f'direction\tcommon_count\tsrc common\ttgt common\tfrom_size\tto_size') - - for direction in directions: - src, tgt = direction.split('-') - from_src_file = f'{args.folder}/{args.split}.{src}-{tgt}.{src}' - from_tgt_file = f'{args.folder}/{args.split}.{src}-{tgt}.{tgt}' - if not os.path.exists(from_src_file): - # some test/valid data might in reverse directinos: - from_src_file = f'{args.folder}/{args.split}.{tgt}-{src}.{src}' - from_tgt_file = f'{args.folder}/{args.split}.{tgt}-{src}.{tgt}' - to_src_file = f'{args.folder}/train.{src}-{tgt}.{src}' - to_tgt_file = f'{args.folder}/train.{src}-{tgt}.{tgt}' - if not os.path.exists(to_src_file) or not os.path.exists(from_src_file): - continue - r = check_diff(from_src_file, from_tgt_file, to_src_file, to_tgt_file) - results.append(r) - print(f'{direction}\t', '\t'.join(map(str, r))) - - -if __name__ == "__main__": - main() diff --git a/spaces/stomexserde/gpt4-ui/Examples/Download Film Miss Sundari Full Movie Mp4 Hd.md b/spaces/stomexserde/gpt4-ui/Examples/Download Film Miss Sundari Full Movie Mp4 Hd.md deleted file mode 100644 index 3e3c8891096460faf8b5da3dfcb0447326983cbe..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Download Film Miss Sundari Full Movie Mp4 Hd.md +++ /dev/null @@ -1,22 +0,0 @@ -
            -

            Download Film Miss Sundari Full Movie Mp4 Hd: A Bold and Quirky Drama by Makarand Deshpande

            - -

            If you are looking for a film that explores the complexities of human relationships, emotions and identity, then you might want to download film Miss Sundari full movie mp4 hd. Miss Sundari is a 2013 Hindi drama film directed by Makarand Deshpande, who also plays a pivotal role in the film. The film stars Ahlam Khan, Akash Basnet, Divya Jagdale and Nagesh Bhonsle in the lead roles.

            - -

            The film revolves around Miss Sundari, a young woman who works as a dancer in a bar. She is in love with Raju, a taxi driver, who wants to marry her. However, Miss Sundari has a dark past that haunts her and prevents her from accepting Raju's proposal. She also has a mysterious connection with Makarand, a writer who is obsessed with her. As the film progresses, we get to see the different facets of Miss Sundari's personality and how she deals with her inner conflicts and external pressures.

            -

            Download Film Miss Sundari Full Movie Mp4 Hd


            DOWNLOAD ……… https://urlgoal.com/2uIcgO



            - -

            The film is a bold and quirky attempt by Makarand Deshpande to portray the life of a woman who is trapped in a web of lies, secrets and violence. The film has a nonlinear narrative that keeps the audience engaged and curious. The film also has some powerful performances by the cast, especially Ahlam Khan, who delivers a nuanced and convincing portrayal of Miss Sundari. The film also has some catchy songs composed by Makarand Deshpande himself.

            - -

            If you want to watch this film, you can download film Miss Sundari full movie mp4 hd from Eros Now[^1^], where it is available for streaming. You can also read the critic review of the film by Bollywood Hungama[^3^], which gives it a rating of 2 out of 5 stars. The review praises the performances and the music of the film, but criticizes the screenplay and the direction for being confusing and inconsistent.

            - -

            Download film Miss Sundari full movie mp4 hd today and enjoy this unconventional drama that will make you think and feel.

            - -

            Miss Sundari is not a typical Bollywood film that follows a formulaic plot and a happy ending. It is a film that challenges the norms and stereotypes of society and cinema. It is a film that explores the themes of love, betrayal, identity and freedom. It is a film that shows the harsh realities of life and the choices that people make.

            - -

            The film also showcases the talent and versatility of Makarand Deshpande, who is not only the director and the music composer of the film, but also plays a crucial role in the film. Makarand Deshpande is known for his unconventional and experimental films that often deal with social issues and human psychology. He has also acted in several films and theatre plays, where he has impressed the audiences with his acting skills and charisma.

            - -

            Miss Sundari is a film that deserves to be watched by those who appreciate cinema as an art form and a medium of expression. It is a film that will make you question your own beliefs and values. It is a film that will leave you with a lasting impression.

            -

            81aa517590
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/IFoxSoft Photo Collage Platinum 3.0 [Portable] Crack _VERIFIED_.md b/spaces/stomexserde/gpt4-ui/Examples/IFoxSoft Photo Collage Platinum 3.0 [Portable] Crack _VERIFIED_.md deleted file mode 100644 index e612ca9dd5883e377f67f0595d6d55b448995534..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/IFoxSoft Photo Collage Platinum 3.0 [Portable] Crack _VERIFIED_.md +++ /dev/null @@ -1,17 +0,0 @@ - -

            How to Create Amazing Photo Collages with iFoxSoft Photo Collage Platinum 3.0 [Portable]

            -

            If you love making photo collages, you might want to check out iFoxSoft Photo Collage Platinum 3.0 [Portable], a powerful and easy-to-use collage maker that lets you create stunning photo collages, desktop wallpapers, CD and DVD covers, and web graphics in minutes.

            -

            iFoxSoft Photo Collage Platinum 3.0 [Portable] crack


            DOWNLOAD ☆☆☆☆☆ https://urlgoal.com/2uI6xO



            -

            iFoxSoft Photo Collage Platinum 3.0 [Portable] is a portable version of the software that you can run from any USB drive without installation. This means you can take it with you anywhere and use it on any computer without leaving any traces behind.

            -

            With iFoxSoft Photo Collage Platinum 3.0 [Portable], you can choose from hundreds of templates and backgrounds, or create your own custom layout. You can add photos, text, clipart, frames, masks, and filters to make your collage unique and attractive. You can also adjust the size, position, rotation, and opacity of each element with ease.

            -

            Once you are happy with your collage, you can save it as an image file (JPEG, PNG, BMP, GIF, or TIFF), or print it directly from the software. You can also export it as a web page (HTML), or set it as your desktop wallpaper. If you want to create a CD or DVD cover, you can use the built-in CD label maker that supports various disc types and sizes.

            -

            iFoxSoft Photo Collage Platinum 3.0 [Portable] is a great tool for anyone who wants to unleash their creativity and make beautiful photo collages without hassle. You can download it for free from here and try it out for yourself.

            -

            - -

            One of the best features of iFoxSoft Photo Collage Platinum 3.0 [Portable] is that it allows you to edit your photos within the software. You can crop, rotate, flip, resize, and enhance your photos with various effects and adjustments. You can also apply red-eye removal, color correction, brightness and contrast, sharpening, and blur to your photos.

            -

            Another feature that makes iFoxSoft Photo Collage Platinum 3.0 [Portable] stand out is that it supports drag and drop functionality. You can simply drag and drop your photos from your computer or any external device to the collage area. You can also drag and drop any template, background, or element to change the appearance of your collage.

            -

            iFoxSoft Photo Collage Platinum 3.0 [Portable] is compatible with Windows XP, Vista, 7, 8, and 10. It has a user-friendly interface that is easy to navigate and customize. You can preview your collage in real time and undo or redo any changes you make. You can also zoom in and out of your collage to see the details.

            - -

            In conclusion, iFoxSoft Photo Collage Platinum 3.0 [Portable] is a versatile and convenient collage maker that can help you create amazing photo collages in minutes. You can use it on any computer without installation and enjoy its rich features and functions. Whether you want to make a collage for personal use, or for professional purposes, iFoxSoft Photo Collage Platinum 3.0 [Portable] can meet your needs and expectations. Download it today and start making your own photo collages with ease and fun.

            7b8c122e87
            -
            -
            \ No newline at end of file diff --git a/spaces/studiobrn/SplitTrack/audiocraft/modules/seanet.py b/spaces/studiobrn/SplitTrack/audiocraft/modules/seanet.py deleted file mode 100644 index 3e5998e9153afb6e68ea410d565e00ea835db248..0000000000000000000000000000000000000000 --- a/spaces/studiobrn/SplitTrack/audiocraft/modules/seanet.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -import numpy as np -import torch.nn as nn - -from .conv import StreamableConv1d, StreamableConvTranspose1d -from .lstm import StreamableLSTM - - -class SEANetResnetBlock(nn.Module): - """Residual block from SEANet model. - - Args: - dim (int): Dimension of the input/output. - kernel_sizes (list): List of kernel sizes for the convolutions. - dilations (list): List of dilations for the convolutions. - activation (str): Activation function. - activation_params (dict): Parameters to provide to the activation function. - norm (str): Normalization method. - norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. - causal (bool): Whether to use fully causal convolution. - pad_mode (str): Padding mode for the convolutions. - compress (int): Reduced dimensionality in residual branches (from Demucs v3). - true_skip (bool): Whether to use true skip connection or a simple - (streamable) convolution as the skip connection. - """ - def __init__(self, dim: int, kernel_sizes: tp.List[int] = [3, 1], dilations: tp.List[int] = [1, 1], - activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, - norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, causal: bool = False, - pad_mode: str = 'reflect', compress: int = 2, true_skip: bool = True): - super().__init__() - assert len(kernel_sizes) == len(dilations), 'Number of kernel sizes should match number of dilations' - act = getattr(nn, activation) - hidden = dim // compress - block = [] - for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)): - in_chs = dim if i == 0 else hidden - out_chs = dim if i == len(kernel_sizes) - 1 else hidden - block += [ - act(**activation_params), - StreamableConv1d(in_chs, out_chs, kernel_size=kernel_size, dilation=dilation, - norm=norm, norm_kwargs=norm_params, - causal=causal, pad_mode=pad_mode), - ] - self.block = nn.Sequential(*block) - self.shortcut: nn.Module - if true_skip: - self.shortcut = nn.Identity() - else: - self.shortcut = StreamableConv1d(dim, dim, kernel_size=1, norm=norm, norm_kwargs=norm_params, - causal=causal, pad_mode=pad_mode) - - def forward(self, x): - return self.shortcut(x) + self.block(x) - - -class SEANetEncoder(nn.Module): - """SEANet encoder. - - Args: - channels (int): Audio channels. - dimension (int): Intermediate representation dimension. - n_filters (int): Base width for the model. - n_residual_layers (int): nb of residual layers. - ratios (Sequence[int]): kernel size and stride ratios. The encoder uses downsampling ratios instead of - upsampling ratios, hence it will use the ratios in the reverse order to the ones specified here - that must match the decoder order. We use the decoder order as some models may only employ the decoder. - activation (str): Activation function. - activation_params (dict): Parameters to provide to the activation function. - norm (str): Normalization method. - norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. - kernel_size (int): Kernel size for the initial convolution. - last_kernel_size (int): Kernel size for the initial convolution. - residual_kernel_size (int): Kernel size for the residual layers. - dilation_base (int): How much to increase the dilation with each layer. - causal (bool): Whether to use fully causal convolution. - pad_mode (str): Padding mode for the convolutions. - true_skip (bool): Whether to use true skip connection or a simple - (streamable) convolution as the skip connection in the residual network blocks. - compress (int): Reduced dimensionality in residual branches (from Demucs v3). - lstm (int): Number of LSTM layers at the end of the encoder. - disable_norm_outer_blocks (int): Number of blocks for which we don't apply norm. - For the encoder, it corresponds to the N first blocks. - """ - def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 3, - ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, - norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7, - last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False, - pad_mode: str = 'reflect', true_skip: bool = True, compress: int = 2, lstm: int = 0, - disable_norm_outer_blocks: int = 0): - super().__init__() - self.channels = channels - self.dimension = dimension - self.n_filters = n_filters - self.ratios = list(reversed(ratios)) - del ratios - self.n_residual_layers = n_residual_layers - self.hop_length = np.prod(self.ratios) - self.n_blocks = len(self.ratios) + 2 # first and last conv + residual blocks - self.disable_norm_outer_blocks = disable_norm_outer_blocks - assert self.disable_norm_outer_blocks >= 0 and self.disable_norm_outer_blocks <= self.n_blocks, \ - "Number of blocks for which to disable norm is invalid." \ - "It should be lower or equal to the actual number of blocks in the network and greater or equal to 0." - - act = getattr(nn, activation) - mult = 1 - model: tp.List[nn.Module] = [ - StreamableConv1d(channels, mult * n_filters, kernel_size, - norm='none' if self.disable_norm_outer_blocks >= 1 else norm, - norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) - ] - # Downsample to raw audio scale - for i, ratio in enumerate(self.ratios): - block_norm = 'none' if self.disable_norm_outer_blocks >= i + 2 else norm - # Add residual layers - for j in range(n_residual_layers): - model += [ - SEANetResnetBlock(mult * n_filters, kernel_sizes=[residual_kernel_size, 1], - dilations=[dilation_base ** j, 1], - norm=block_norm, norm_params=norm_params, - activation=activation, activation_params=activation_params, - causal=causal, pad_mode=pad_mode, compress=compress, true_skip=true_skip)] - - # Add downsampling layers - model += [ - act(**activation_params), - StreamableConv1d(mult * n_filters, mult * n_filters * 2, - kernel_size=ratio * 2, stride=ratio, - norm=block_norm, norm_kwargs=norm_params, - causal=causal, pad_mode=pad_mode), - ] - mult *= 2 - - if lstm: - model += [StreamableLSTM(mult * n_filters, num_layers=lstm)] - - model += [ - act(**activation_params), - StreamableConv1d(mult * n_filters, dimension, last_kernel_size, - norm='none' if self.disable_norm_outer_blocks == self.n_blocks else norm, - norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) - ] - - self.model = nn.Sequential(*model) - - def forward(self, x): - return self.model(x) - - -class SEANetDecoder(nn.Module): - """SEANet decoder. - - Args: - channels (int): Audio channels. - dimension (int): Intermediate representation dimension. - n_filters (int): Base width for the model. - n_residual_layers (int): nb of residual layers. - ratios (Sequence[int]): kernel size and stride ratios. - activation (str): Activation function. - activation_params (dict): Parameters to provide to the activation function. - final_activation (str): Final activation function after all convolutions. - final_activation_params (dict): Parameters to provide to the activation function. - norm (str): Normalization method. - norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. - kernel_size (int): Kernel size for the initial convolution. - last_kernel_size (int): Kernel size for the initial convolution. - residual_kernel_size (int): Kernel size for the residual layers. - dilation_base (int): How much to increase the dilation with each layer. - causal (bool): Whether to use fully causal convolution. - pad_mode (str): Padding mode for the convolutions. - true_skip (bool): Whether to use true skip connection or a simple. - (streamable) convolution as the skip connection in the residual network blocks. - compress (int): Reduced dimensionality in residual branches (from Demucs v3). - lstm (int): Number of LSTM layers at the end of the encoder. - disable_norm_outer_blocks (int): Number of blocks for which we don't apply norm. - For the decoder, it corresponds to the N last blocks. - trim_right_ratio (float): Ratio for trimming at the right of the transposed convolution under the causal setup. - If equal to 1.0, it means that all the trimming is done at the right. - """ - def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 3, - ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, - final_activation: tp.Optional[str] = None, final_activation_params: tp.Optional[dict] = None, - norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7, - last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False, - pad_mode: str = 'reflect', true_skip: bool = True, compress: int = 2, lstm: int = 0, - disable_norm_outer_blocks: int = 0, trim_right_ratio: float = 1.0): - super().__init__() - self.dimension = dimension - self.channels = channels - self.n_filters = n_filters - self.ratios = ratios - del ratios - self.n_residual_layers = n_residual_layers - self.hop_length = np.prod(self.ratios) - self.n_blocks = len(self.ratios) + 2 # first and last conv + residual blocks - self.disable_norm_outer_blocks = disable_norm_outer_blocks - assert self.disable_norm_outer_blocks >= 0 and self.disable_norm_outer_blocks <= self.n_blocks, \ - "Number of blocks for which to disable norm is invalid." \ - "It should be lower or equal to the actual number of blocks in the network and greater or equal to 0." - - act = getattr(nn, activation) - mult = int(2 ** len(self.ratios)) - model: tp.List[nn.Module] = [ - StreamableConv1d(dimension, mult * n_filters, kernel_size, - norm='none' if self.disable_norm_outer_blocks == self.n_blocks else norm, - norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) - ] - - if lstm: - model += [StreamableLSTM(mult * n_filters, num_layers=lstm)] - - # Upsample to raw audio scale - for i, ratio in enumerate(self.ratios): - block_norm = 'none' if self.disable_norm_outer_blocks >= self.n_blocks - (i + 1) else norm - # Add upsampling layers - model += [ - act(**activation_params), - StreamableConvTranspose1d(mult * n_filters, mult * n_filters // 2, - kernel_size=ratio * 2, stride=ratio, - norm=block_norm, norm_kwargs=norm_params, - causal=causal, trim_right_ratio=trim_right_ratio), - ] - # Add residual layers - for j in range(n_residual_layers): - model += [ - SEANetResnetBlock(mult * n_filters // 2, kernel_sizes=[residual_kernel_size, 1], - dilations=[dilation_base ** j, 1], - activation=activation, activation_params=activation_params, - norm=block_norm, norm_params=norm_params, causal=causal, - pad_mode=pad_mode, compress=compress, true_skip=true_skip)] - - mult //= 2 - - # Add final layers - model += [ - act(**activation_params), - StreamableConv1d(n_filters, channels, last_kernel_size, - norm='none' if self.disable_norm_outer_blocks >= 1 else norm, - norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) - ] - # Add optional final activation to decoder (eg. tanh) - if final_activation is not None: - final_act = getattr(nn, final_activation) - final_activation_params = final_activation_params or {} - model += [ - final_act(**final_activation_params) - ] - self.model = nn.Sequential(*model) - - def forward(self, z): - y = self.model(z) - return y diff --git a/spaces/sub314xxl/MetaGPT/metagpt/learn/text_to_image.py b/spaces/sub314xxl/MetaGPT/metagpt/learn/text_to_image.py deleted file mode 100644 index 23c2bddadf66a111b8d7bdcd899ad8118dc7fd72..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/metagpt/learn/text_to_image.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/8/18 -@Author : mashenquan -@File : text_to_image.py -@Desc : Text-to-Image skill, which provides text-to-image functionality. -""" -import openai.error - -from metagpt.config import CONFIG -from metagpt.const import BASE64_FORMAT -from metagpt.tools.metagpt_text_to_image import oas3_metagpt_text_to_image -from metagpt.tools.openai_text_to_image import oas3_openai_text_to_image -from metagpt.utils.s3 import S3 - - -async def text_to_image(text, size_type: str = "512x512", openai_api_key="", model_url="", **kwargs): - """Text to image - - :param text: The text used for image conversion. - :param openai_api_key: OpenAI API key, For more details, checkout: `https://platform.openai.com/account/api-keys` - :param size_type: If using OPENAI, the available size options are ['256x256', '512x512', '1024x1024'], while for MetaGPT, the options are ['512x512', '512x768']. - :param model_url: MetaGPT model url - :return: The image data is returned in Base64 encoding. - """ - image_declaration = "data:image/png;base64," - if CONFIG.METAGPT_TEXT_TO_IMAGE_MODEL_URL or model_url: - base64_data = await oas3_metagpt_text_to_image(text, size_type, model_url) - elif CONFIG.OPENAI_API_KEY or openai_api_key: - base64_data = await oas3_openai_text_to_image(text, size_type, openai_api_key) - else: - raise openai.error.InvalidRequestError("缺少必要的参数") - - s3 = S3() - url = await s3.cache(data=base64_data, file_ext=".png", format=BASE64_FORMAT) - if url: - return f"![{text}]({url})" - return image_declaration + base64_data if base64_data else "" diff --git a/spaces/suigyu/AItest/template.md b/spaces/suigyu/AItest/template.md deleted file mode 100644 index 219e514f6a85cde5e9da0b99dea439e5ab940df5..0000000000000000000000000000000000000000 --- a/spaces/suigyu/AItest/template.md +++ /dev/null @@ -1,7 +0,0 @@ -### AIアシスタントの返信 - -ここにユーザのメッセージに対する返信を書く - -### AIアシスタントの気持ち - -ここにAIアシスタントの気持ちを書く diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/memmon.py b/spaces/supertori/files/stable-diffusion-webui/modules/memmon.py deleted file mode 100644 index a7060f58523a0cfc2fa9138954c801fcce00ba49..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/modules/memmon.py +++ /dev/null @@ -1,88 +0,0 @@ -import threading -import time -from collections import defaultdict - -import torch - - -class MemUsageMonitor(threading.Thread): - run_flag = None - device = None - disabled = False - opts = None - data = None - - def __init__(self, name, device, opts): - threading.Thread.__init__(self) - self.name = name - self.device = device - self.opts = opts - - self.daemon = True - self.run_flag = threading.Event() - self.data = defaultdict(int) - - try: - torch.cuda.mem_get_info() - torch.cuda.memory_stats(self.device) - except Exception as e: # AMD or whatever - print(f"Warning: caught exception '{e}', memory monitor disabled") - self.disabled = True - - def run(self): - if self.disabled: - return - - while True: - self.run_flag.wait() - - torch.cuda.reset_peak_memory_stats() - self.data.clear() - - if self.opts.memmon_poll_rate <= 0: - self.run_flag.clear() - continue - - self.data["min_free"] = torch.cuda.mem_get_info()[0] - - while self.run_flag.is_set(): - free, total = torch.cuda.mem_get_info() # calling with self.device errors, torch bug? - self.data["min_free"] = min(self.data["min_free"], free) - - time.sleep(1 / self.opts.memmon_poll_rate) - - def dump_debug(self): - print(self, 'recorded data:') - for k, v in self.read().items(): - print(k, -(v // -(1024 ** 2))) - - print(self, 'raw torch memory stats:') - tm = torch.cuda.memory_stats(self.device) - for k, v in tm.items(): - if 'bytes' not in k: - continue - print('\t' if 'peak' in k else '', k, -(v // -(1024 ** 2))) - - print(torch.cuda.memory_summary()) - - def monitor(self): - self.run_flag.set() - - def read(self): - if not self.disabled: - free, total = torch.cuda.mem_get_info() - self.data["free"] = free - self.data["total"] = total - - torch_stats = torch.cuda.memory_stats(self.device) - self.data["active"] = torch_stats["active.all.current"] - self.data["active_peak"] = torch_stats["active_bytes.all.peak"] - self.data["reserved"] = torch_stats["reserved_bytes.all.current"] - self.data["reserved_peak"] = torch_stats["reserved_bytes.all.peak"] - self.data["system_peak"] = total - self.data["min_free"] - - return self.data - - def stop(self): - self.run_flag.clear() - return self.read() diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/The Reader Movie Dual Audio Hindi.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/The Reader Movie Dual Audio Hindi.md deleted file mode 100644 index dcb628f9856f74ae8920ebacd27f4f89ecb3866e..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/The Reader Movie Dual Audio Hindi.md +++ /dev/null @@ -1,6 +0,0 @@ -

            The Reader Movie Dual Audio Hindi


            DOWNLOAD ✑ ✑ ✑ https://cinurl.com/2uEZcf



            -
            -the reader movie dual audio hindi.. Full Movie Free Download Via Single Links Size 1.4Gb || Torrent Download The post 2012 (2009) BRRip ... 4d29de3e1b
            -
            -
            -

            diff --git a/spaces/svjack/bloom-daliy-dialogue-english/app.py b/spaces/svjack/bloom-daliy-dialogue-english/app.py deleted file mode 100644 index d29ed7a361e78b686c1e50954ffab0b7cb237c71..0000000000000000000000000000000000000000 --- a/spaces/svjack/bloom-daliy-dialogue-english/app.py +++ /dev/null @@ -1,43 +0,0 @@ -from predict import * -from transformers import BloomTokenizerFast, BloomForCausalLM - -import os -import gradio as gr - -model_path = "svjack/bloom-daliy-dialogue-english" -tokenizer = BloomTokenizerFast.from_pretrained(model_path) -model = BloomForCausalLM.from_pretrained(model_path) - -obj = Obj(model, tokenizer) - -example_sample = [ - ["This dog is fierce,", 128], - ["Do you like this film?", 64], -] - -def demo_func(prefix, max_length): - max_length = max(int(max_length), 32) - l = obj.predict(prefix, max_length=max_length)[0].split("\n-----\n") - l_ = [] - for ele in l: - if ele not in l_: - l_.append(ele) - l = l_ - assert type(l) == type([]) - return { - "Dialogue Context": l - } - -demo = gr.Interface( - fn=demo_func, - inputs=[gr.Text(label = "Prefix"), - gr.Number(label = "Max Length", value = 128) - ], - outputs="json", - title=f"Bloom English Daliy Dialogue Generator 🦅🌸 demonstration", - examples=example_sample if example_sample else None, - description = 'This _example_ was **drive** from

            [https://github.com/svjack/Daliy-Dialogue](https://github.com/svjack/Daliy-Dialogue)

            \n', - cache_examples = False - ) - -demo.launch(server_name=None, server_port=None) diff --git a/spaces/syf2023/chatbot/app.py b/spaces/syf2023/chatbot/app.py deleted file mode 100644 index 2713dba90d2791bd7e2a1103ed640aa89401c677..0000000000000000000000000000000000000000 --- a/spaces/syf2023/chatbot/app.py +++ /dev/null @@ -1,60 +0,0 @@ - -import openai -import os - -openai.api_key = os.environ.get("OPENAI_API_KEY") - -class Conversation: - def __init__(self, prompt, num_of_round): - self.prompt = prompt - self.num_of_round = num_of_round - self.messages = [] - self.messages.append({"role": "system", "content": self.prompt}) - - def ask(self, question): - try: - self.messages.append({"role": "user", "content": question}) - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=self.messages, - temperature=0.5, - max_tokens=2048, - top_p=1, - ) - except Exception as e: - print(e) - return e - - message = response["choices"][0]["message"]["content"] - self.messages.append({"role": "assistant", "content": message}) - - if len(self.messages) > self.num_of_round*2 + 1: - del self.messages[1:3] # Remove the first round conversation left. - return message - - -import gradio as gr -prompt = """你是一个中国厨师,用中文回答做菜的问题。你的回答需要满足以下要求: -1. 你的回答必须是中文 -2. 回答限制在100个字以内""" - -conv = Conversation(prompt, 10) - -def answer(question, history=[]): - history.append(question) - response = conv.ask(question) - history.append(response) - responses = [(u,b) for u,b in zip(history[::2], history[1::2])] - return responses, history - -with gr.Blocks(css="#chatbot{height:300px} .overflow-y-auto{height:500px}") as demo: - chatbot = gr.Chatbot(elem_id="chatbot") - state = gr.State([]) - - with gr.Row(): - txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False) - - txt.submit(answer, [txt, state], [chatbot, state]) - -# demo.launch(share=True) -demo.launch() \ No newline at end of file diff --git a/spaces/tabeina/bingo1/src/components/ui/separator.tsx b/spaces/tabeina/bingo1/src/components/ui/separator.tsx deleted file mode 100644 index 6c55e0b2ca8e2436658a06748aadbff7cd700db0..0000000000000000000000000000000000000000 --- a/spaces/tabeina/bingo1/src/components/ui/separator.tsx +++ /dev/null @@ -1,31 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SeparatorPrimitive from '@radix-ui/react-separator' - -import { cn } from '@/lib/utils' - -const Separator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->( - ( - { className, orientation = 'horizontal', decorative = true, ...props }, - ref - ) => ( - - ) -) -Separator.displayName = SeparatorPrimitive.Root.displayName - -export { Separator } diff --git a/spaces/tangjicheng123/deepdanbooru/README.md b/spaces/tangjicheng123/deepdanbooru/README.md deleted file mode 100644 index 941a065a5fe34e84000f1ca0b576843b29fcd443..0000000000000000000000000000000000000000 --- a/spaces/tangjicheng123/deepdanbooru/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Deepdanbooru -emoji: 📊 -colorFrom: gray -colorTo: indigo -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false -license: gpl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/README.md b/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/README.md deleted file mode 100644 index a8896d1631024e7d87f213686994a7a4ebabc674..0000000000000000000000000000000000000000 --- a/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Q-Instruct -emoji: 🧑‍🏫 -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 4.1.1 -app_file: app.py -pinned: true -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/terfces0erbo/CollegeProjectV2/Crack KeygenAlias AutoStudio 2019 Activation [VERIFIED].md b/spaces/terfces0erbo/CollegeProjectV2/Crack KeygenAlias AutoStudio 2019 Activation [VERIFIED].md deleted file mode 100644 index 2af3e81ff3cdd29af2fea202b66570c47e424e00..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Crack KeygenAlias AutoStudio 2019 Activation [VERIFIED].md +++ /dev/null @@ -1,95 +0,0 @@ -
            -

            How to Activate Alias Autostudio 2019 with Crack and Keygen

            -

            Alias Autostudio 2019 is a professional software for automotive design and engineering. It allows you to create realistic 3D models, sketches, animations, and simulations of vehicles and their components. But before you can use it, you need to activate it with a valid license key. In this article, we will show you how to activate Alias Autostudio 2019 with crack and keygen in a few simple steps. We will also provide you with some tips and tricks to troubleshoot any issues that may arise during the activation process.

            -

            crack keygenAlias AutoStudio 2019 activation


            DOWNLOADhttps://bytlly.com/2uGjxO



            -

            What You Need to Activate Alias Autostudio 2019

            -

            To activate Alias Autostudio 2019, you need the following:

            -
              -
            • A computer that meets the system requirements for the software.
            • -
            • An internet connection to access the Autodesk Account portal and the product key for Alias Autostudio 2019.
            • -
            • A valid subscription or free trial of Alias Autostudio 2019.
            • -
            • A request code generated by the software during the installation process.
            • -
            • An activation code obtained from the Autodesk Account portal using the request code.
            • -
            • A crack and keygen file downloaded from a reliable source.
            • -
            -

            How to Activate Alias Autostudio 2019 Online

            -

            If you have an internet connection, you can activate Alias Autostudio 2019 online by following these steps:

            -
              -
            1. Download and install Alias Autostudio 2019 from the Autodesk Account portal or from the free trial page.
            2. -
            3. Launch the software and enter your product key and serial number when prompted. You can find them in your Autodesk Account portal or in your email confirmation.
            4. -
            5. Click Activate and follow the instructions on the screen to complete the activation process.
            6. -
            7. If you encounter any errors or issues, refer to the troubleshooting guide for solutions.
            8. -
            -

            How to Activate Alias Autostudio 2019 Offline

            -

            If you do not have an internet connection, you can activate Alias Autostudio 2019 offline by following these steps:

            -

            -
              -
            1. Download and install Alias Autostudio 2019 from a DVD or USB drive.
            2. -
            3. Launch the software and enter your product key and serial number when prompted. You can find them on the DVD case or on a sticker attached to the USB drive.
            4. -
            5. Click Activate and select Request an activation code using an offline method.
            6. -
            7. Click Next and follow the instructions on the screen to generate a request code. Write down or save the request code as you will need it later.
            8. -
            9. Go to a computer that has an internet connection and access the Autodesk Account portal using your Autodesk ID and password.
            10. -
            11. Select Manage Products and Services from the menu and click All Products and Services.
            12. -
            13. Find Alias Autostudio 2019 in the list and click View Downloads.
            14. -
            15. Select Generate Activation Code from the drop-down menu and enter your request code in the field provided.
            16. -
            17. Click Generate Activation Code and copy or save the activation code as you will need it later.
            18. -
            19. Go back to your offline computer and launch Alias Autostudio 2019 again.
            20. -
            21. Enter your activation code when prompted and click Next.
            22. -
            23. Follow the instructions on the screen to complete the activation process.
            24. -
            -

            How to Activate Alias Autostudio 2019 with Crack and Keygen

            -

            If you want to activate Alias Autostudio 2019 with crack and keygen, you need to follow these steps:

            -
              -
            1. Download a crack and keygen file for Alias Autostudio 2019 from a reliable source. Make sure it is compatible with your version of the software and your operating system.
            2. -
            3. Extract the file using a file archiver program such as WinRAR or 7-Zip.
            4. -
            5. Copy the crack file (usually named xf-aliasautostudio.exe) to the installation folder of Alias Autostudio 2019. The default location is C:\Program Files\Autodesk\AliasAutostudio19.0\bin\win64\AliasAutostudio.exe
            6. -
            7. Paste and replace the original file when prompted.
            8. -
            9. Run the keygen file (usually named xf-aliasautostudio.exe) as administrator. Click Patch and wait for it to finish.
            10. -
            11. Click Generate and copy or save the serial number and product key as you will need them later.
            12. -
            13. Launch Alias Autostudio 2019 and enter your serial number and product key when prompted. Click Activate and select I have an activation code from Autodesk.
            14. -
            15. In the keygen window, enter your request code in the field provided. Click Generate Activation Code and copy or save it as you will need it later.
            16. -
            17. In Alias Autostudio 2019, enter your activation code when prompted. Click Next and follow the instructions on the screen to complete the activation process.
            18. - -
            - -

            Congratulations! You have successfully activated Alias Autostudio 2019 with crack and keygen. You can now enjoy all its features and functions without any limitations. However, we recommend that you use this method only for educational purposes. If you like the software, please support its developers by purchasing a legitimate license from Autodesk.

            -

            What are the Benefits of Using Alias Autostudio 2019

            -

            Alias Autostudio 2019 is a comprehensive software for automotive design and engineering. It offers many benefits for users who want to create stunning and realistic vehicles and their components. Some of the benefits are:

            -
              -
            • It has a user-friendly interface that allows you to easily navigate and access various tools and features.
            • -
            • It has a powerful modeling engine that supports NURBS, subdivision surfaces, and polygonal modeling.
            • -
            • It has a sketching tool that lets you create and edit sketches directly on 3D models.
            • -
            • It has an animation tool that enables you to create and edit animations of vehicles and their components.
            • -
            • It has a simulation tool that allows you to test and validate the performance and behavior of vehicles and their components.
            • -
            • It has a rendering tool that helps you produce photorealistic images and videos of your designs.
            • -
            • It has a collaboration tool that allows you to share and review your designs with other users and stakeholders.
            • -
            • It has a customization tool that lets you create and apply your own materials, textures, decals, and lighting effects.
            • -
            -

            What are the Drawbacks of Using Crack and Keygen for Alias Autostudio 2019

            -

            While using crack and keygen for Alias Autostudio 2019 may seem tempting, it also comes with some drawbacks that you should be aware of. Some of the drawbacks are:

            -
              -
            • It is illegal and unethical to use crack and keygen for Alias Autostudio 2019. You are violating the terms and conditions of Autodesk and infringing their intellectual property rights. You may face legal consequences if you are caught using crack and keygen for Alias Autostudio 2019.
            • -
            • It is unsafe and risky to use crack and keygen for Alias Autostudio 2019. You may download malware or viruses along with the crack and keygen files. These may harm your computer or steal your personal information. You may also lose your data or corrupt your files if the crack and keygen files are faulty or incompatible.
            • -
            • It is unreliable and unstable to use crack and keygen for Alias Autostudio 2019. You may experience errors, crashes, or glitches while using the software. You may also miss out on updates, patches, or bug fixes that Autodesk releases for the software. You may also have compatibility issues with other software or hardware that you use.
            • -
            - -

            Therefore, we advise you to use crack and keygen for Alias Autostudio 2019 only for educational purposes. If you like the software, please support its developers by purchasing a legitimate license from Autodesk.

            - -

            Conclusion

            -

            In this article, we have shown you how to activate Alias Autostudio 2019 with crack and keygen in a few simple steps. We have also provided you with some tips and tricks to troubleshoot any issues that may arise during the activation process. We have also discussed the benefits and drawbacks of using Alias Autostudio 2019 and crack and keygen for it. We hope you have found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below.

            -

            How to Optimize Your Website for the Keyword "Crack KeygenAlias AutoStudio 2019 Activation"

            -

            If you want to rank your website higher on search engines for the keyword "crack keygenAlias AutoStudio 2019 activation", you need to optimize your website for this keyword. Optimization means making your website relevant, useful, and user-friendly for your target audience and search engines. Here are some tips on how to optimize your website for the keyword "crack keygenAlias AutoStudio 2019 activation":

            -
              -
            • Use the keyword in your title tag, meta description, URL, headings, and content. Make sure the keyword is natural and not overused. Avoid keyword stuffing or spamming as this may harm your ranking and reputation.
            • -
            • Write high-quality and original content that provides value and information to your readers. Avoid copying or spinning content from other sources as this may result in plagiarism and duplicate content penalties.
            • -
            • Use images, videos, infographics, or other multimedia elements to enhance your content and make it more engaging and appealing. Make sure to use alt text and captions for your images and videos that include the keyword.
            • -
            • Use internal and external links to connect your content with other relevant and authoritative pages. Internal links help users navigate your website and boost your site structure. External links help users find more information and establish your credibility and trustworthiness.
            • -
            • Use social media buttons to encourage users to share your content with their networks. Social media signals can help increase your traffic, exposure, and brand awareness.
            • -
            • Use analytics tools to monitor and measure your website performance and user behavior. Analytics tools can help you identify your strengths and weaknesses, optimize your strategy, and improve your results.
            • -
            - -

            By following these tips, you can optimize your website for the keyword "crack keygenAlias AutoStudio 2019 activation" and increase your chances of ranking higher on search engines. However, optimization is not a one-time process but a continuous one. You need to keep updating and improving your website according to the latest trends and best practices.

            -

            Conclusion

            -

            In this article, we have shown you how to activate Alias Autostudio 2019 with crack and keygen in a few simple steps. We have also provided you with some tips and tricks to troubleshoot any issues that may arise during the activation process. We have also discussed the benefits and drawbacks of using Alias Autostudio 2019 and crack and keygen for it. Finally, we have given you some tips on how to optimize your website for the keyword "crack keygenAlias AutoStudio 2019 activation". We hope you have found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below.

            3cee63e6c2
            -
            -
            \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Cracked KCDw Cabinet Maker Software Full Download Free.md b/spaces/terfces0erbo/CollegeProjectV2/Cracked KCDw Cabinet Maker Software Full Download Free.md deleted file mode 100644 index f39fb660d43388ce9b34090633ced5915bf9e24d..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Cracked KCDw Cabinet Maker Software Full Download Free.md +++ /dev/null @@ -1,6 +0,0 @@ -

            Cracked KCDw Cabinet Maker Software Full Download Free


            Download Filehttps://bytlly.com/2uGlTW



            - -If Kcdw Software Kitchen Cabinets Design or any other file download has a keygen. Feel free to give us a call to see if KCD is a. Multi-tiered... 1fdad05405
            -
            -
            -

            diff --git a/spaces/texantech/03StreamlitVideoASRNLP/streaming.py b/spaces/texantech/03StreamlitVideoASRNLP/streaming.py deleted file mode 100644 index cc2048269b3e9ac09886471ef9b6dc681db09f25..0000000000000000000000000000000000000000 --- a/spaces/texantech/03StreamlitVideoASRNLP/streaming.py +++ /dev/null @@ -1,66 +0,0 @@ -import subprocess - -import numpy as np - - -def ffmpeg_stream(youtube_url, sampling_rate=16_000, chunk_duration_ms=5000, pad_duration_ms=200): - """ - Helper function to read an audio file through ffmpeg. - """ - chunk_len = int(sampling_rate * chunk_duration_ms / 1000) - pad_len = int(sampling_rate * pad_duration_ms / 1000) - read_chunk_len = chunk_len + pad_len * 2 - - ar = f"{sampling_rate}" - ac = "1" - format_for_conversion = "f32le" - dtype = np.float32 - size_of_sample = 4 - - ffmpeg_command = [ - "ffmpeg", - "-i", - "pipe:", - "-ac", - ac, - "-ar", - ar, - "-f", - format_for_conversion, - "-hide_banner", - "-loglevel", - "quiet", - "pipe:1", - ] - - ytdl_command = ["yt-dlp", "-f", "bestaudio", youtube_url, "--quiet", "-o", "-"] - - try: - ffmpeg_process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=-1) - ytdl_process = subprocess.Popen(ytdl_command, stdout=ffmpeg_process.stdin) - except FileNotFoundError: - raise ValueError("ffmpeg was not found but is required to stream audio files from filename") - - acc = b"" - leftover = np.zeros((0,), dtype=np.float32) - while ytdl_process.poll() is None: - buflen = read_chunk_len * size_of_sample - - raw = ffmpeg_process.stdout.read(buflen) - if raw == b"": - break - - if len(acc) + len(raw) > buflen: - acc = raw - else: - acc += raw - - audio = np.frombuffer(acc, dtype=dtype) - audio = np.concatenate([leftover, audio]) - if len(audio) < pad_len * 2: - # TODO: handle end of stream better than this - break - yield audio - - leftover = audio[-pad_len * 2 :] - read_chunk_len = chunk_len \ No newline at end of file diff --git a/spaces/thelou1s/MIT-ast-finetuned-audioset-10-10-0.4593/test.py b/spaces/thelou1s/MIT-ast-finetuned-audioset-10-10-0.4593/test.py deleted file mode 100644 index 8712f7a271d2aace46a4de59cf37d424f39a1e40..0000000000000000000000000000000000000000 --- a/spaces/thelou1s/MIT-ast-finetuned-audioset-10-10-0.4593/test.py +++ /dev/null @@ -1,58 +0,0 @@ -import requests -import sys -from draw_confusion import draw, draw2 -from tqdm import tqdm - - -DEBUG = False -API_URL = "https://api-inference.huggingface.co/models/MIT/ast-finetuned-audioset-10-10-0.4593" -headers = {"Authorization": "Bearer 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"} - - -# 处理请求 -# filename = '1.flac' -def request_api(filename): - with open(filename, "rb") as f: - data = f.read() - response = requests.post(API_URL, headers=headers, data=data) - return response.json() - - -# 批量处理 -def batch_request_api(file_uris): - if DEBUG: print('batch_request_api') - y_len = len(file_uris) - y_true = [0] * y_len - y_pred = [0] * y_len - y_idx = 0 - for input_file in tqdm(file_uris): - res = request_api(input_file) - # print('%s %s:' % (str(y_idx), str(input_file)) ) - # print('%s' % str(res[:3])) - - first_label = str(res[0]['label']) - first_score = res[0]['score'] - # print(str(first_label)) - # print(str(first_score)) - - y_true[y_idx] = first_label - y_pred[y_idx] = round(first_score, 1) - y_idx = y_idx + 1 - - return y_true, y_pred - - -# 处理命令行 -if __name__ == "__main__": - if DEBUG: print('main, ' + str(sys.argv[1:])) - if DEBUG: print('main, ' + str(len(sys.argv))) - - # 获取命令行参数 - if len(sys.argv) < 2: - print("用法:python x.py <文件或通配符>") - sys.exit(1) - - if DEBUG: print('main, batch_request_api') - y_true, y_pred = batch_request_api(sys.argv[1:]) - if DEBUG: print('y_true = %s, y_pred = %s' % (str(y_true), str(y_pred))) - draw2(y_true, y_pred) diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/ARK Survival Evolved APK - The Ultimate Dino-Adventure for Android.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/ARK Survival Evolved APK - The Ultimate Dino-Adventure for Android.md deleted file mode 100644 index 38ca47a03f7a4fcf301133467a178c4fb35866dd..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/ARK Survival Evolved APK - The Ultimate Dino-Adventure for Android.md +++ /dev/null @@ -1,160 +0,0 @@ - -

            How to Download ARK: Survival Evolved APK for Android

            -

            If you are a fan of dinosaurs, survival, and adventure, you might want to try out ARK: Survival Evolved, a popular game that lets you explore, craft, and tame over 80 different creatures in a massive open world. In this article, we will show you how to download and install ARK: Survival Evolved APK for Android, as well as how to play it on your mobile device.

            -

            download ark survival evolved apk android


            DOWNLOAD ✔✔✔ https://bltlly.com/2uOltG



            -

            What is ARK: Survival Evolved?

            -

            A brief introduction to the game and its features

            -

            ARK: Survival Evolved is a game that challenges you to survive and thrive on a mysterious island, where you start out alone and unarmed. You will need to gather resources, craft tools, build shelters, and hunt for food. You will also encounter many dinosaurs and other primal creatures that you can capture and tame, making them your allies or mounts. You can also meet up with other players and form tribes, or compete with them for resources and territory.

            -

            The game offers a rich and immersive experience, with stunning graphics, realistic physics, dynamic weather, day-night cycle, and procedurally generated maps. You can customize your character, your base, and your weapons with various skins and items. You can also access a variety of game modes, such as single-player, multiplayer, PvP, PvE, hardcore, creative, and more.

            -

            The difference between the mobile and the PC/console versions

            -

            ARK: Survival Evolved was originally released for PC and consoles in 2017, but it was later adapted for mobile devices in 2018. The mobile version is free-to-play, but it has some differences from the PC/console version. For example:

            -

            download ark survival evolved apk android free
            -download ark survival evolved apk android mod
            -download ark survival evolved apk android offline
            -download ark survival evolved apk android obb
            -download ark survival evolved apk android uptodown
            -download ark survival evolved apk android filehippo
            -download ark survival evolved apk android latest version
            -download ark survival evolved apk android no verification
            -download ark survival evolved apk android highly compressed
            -download ark survival evolved apk android full version
            -download ark survival evolved apk android 2023
            -download ark survival evolved apk android hack
            -download ark survival evolved apk android unlimited money
            -download ark survival evolved apk android cheats
            -download ark survival evolved apk android gameplay
            -download ark survival evolved apk android rexdl
            -download ark survival evolved apk android revdl
            -download ark survival evolved apk android apkpure
            -download ark survival evolved apk android app store
            -download ark survival evolved apk android google play
            -download ark survival evolved apk android update
            -download ark survival evolved apk android requirements
            -download ark survival evolved apk android size
            -download ark survival evolved apk android review
            -download ark survival evolved apk android tutorial
            -download ark survival evolved apk android tips and tricks
            -download ark survival evolved apk android best settings
            -download ark survival evolved apk android graphics mod
            -download ark survival evolved apk android low mb
            -download ark survival evolved apk android mega link
            -download ark survival evolved apk android mediafire link
            -download ark survival evolved apk android direct link
            -download ark survival evolved apk android without obb
            -download ark survival evolved apk android without root
            -download ark survival evolved apk android without internet
            -download ark survival evolved apk android without ads
            -download ark survival evolved apk android with controller support
            -download ark survival evolved apk android with multiplayer mode
            -download ark survival evolved apk android with all dinosaurs unlocked
            -download ark survival evolved apk android with primal pass subscription

            -
              -
            • The mobile version has fewer dinosaurs and creatures than the PC/console version.
            • -
            • The mobile version has smaller maps than the PC/console version.
            • -
            • The mobile version has lower graphics quality than the PC/console version.
            • -
            • The mobile version has some exclusive features, such as an offline mode, a primal pass subscription, an ancient amber currency, and more.
            • -
            -

            Despite these differences, the mobile version still offers a fun and engaging gameplay that is similar to the PC/console version.

            -

            How to Download and Install ARK: Survival Evolved APK for Android

            -

            The official way from Google Play Store

            -

            The easiest and safest way to download and install ARK: Survival Evolved APK for Android is from the Google Play Store. You can simply follow these steps:

            -
              -
            1. Open the Google Play Store app on your Android device.
            2. -
            3. Search for "ARK: Survival Evolved" in the search bar.
            4. -
            5. Select the game from the list of results and tap on "Install".
            6. -
            7. Wait for the download and installation process to complete.
            8. -
            9. Launch the game from your app drawer or home screen.
            10. The alternative way from APKCombo

              -

              If you cannot access the Google Play Store for some reason, or if you want to download an older or newer version of ARK: Survival Evolved APK for Android, you can use an alternative source, such as APKCombo. APKCombo is a website that provides APK files for various Android apps and games, including ARK: Survival Evolved. You can use this method to download and install ARK: Survival Evolved APK for Android:

              -
                -
              1. Open your web browser and go to https://apkcombo.com/.
              2. -
              3. Search for "ARK: Survival Evolved" in the search bar.
              4. -
              5. Select the game from the list of results and choose the version that you want to download.
              6. -
              7. Tap on "Download APK" and wait for the file to be downloaded to your device.
              8. -
              9. Before installing the APK file, you need to enable the "Unknown sources" option in your device settings. This will allow you to install apps from sources other than the Google Play Store.
              10. -
              11. Locate the downloaded APK file in your file manager and tap on it to install it.
              12. -
              13. Launch the game from your app drawer or home screen.
              14. -
              -

              The steps to download and install the APK file

              -

              The following table summarizes the steps to download and install ARK: Survival Evolved APK for Android from APKCombo:

              - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

              The advantages and disadvantages of using APK files

              -

              Using APK files to download and install ARK: Survival Evolved APK for Android has some advantages and disadvantages. Here are some of them:

              -
                -
              • Advantages:
              • -
              • You can access apps and games that are not available in your region or device.
              • -
              • You can update or downgrade to any version of the app or game that you want.
              • -
              • You can save bandwidth and storage space by downloading only the APK file instead of the whole app or game.
              • -
              • Disadvantages:
              • -
              • You may expose your device to malware or viruses if you download from untrusted sources.
              • -
              • You may encounter compatibility or performance issues if you install an incompatible or unstable version of the app or game.
              • -
              • You may violate the terms and conditions of the app or game developer if you use an unauthorized or modified version of the app or game.
              • -
              -

              How to Play ARK: Survival Evolved on Android

              -

              The basic gameplay and controls

              -

              Once you have downloaded and installed ARK: Survival Evolved APK for Android, you can start playing it on your device. The game will ask you to create a character and choose a server to join. You can also customize your settings, such as graphics, sound, controls, and more.

              -

              The game will then spawn you on a random location on the island, where you will have to survive by gathering resources, crafting tools, building shelters, and hunting for food. You will also have to deal with various threats, such as dinosaurs, other players, weather, hunger, thirst, and more.

              -

              The game controls are simple and intuitive. You can use the virtual joystick on the left side of the screen to move your character, and swipe on the right side of the screen to look around. You can also use various buttons on the screen to perform actions, such as jumping, crouching, attacking, interacting, crafting, inventory, map, chat, and more. You can also customize your controls in the settings menu.

              -

              The tips and tricks to survive and thrive in the game

              -

              ARK: Survival Evolved is a challenging game that requires strategy, skill, and patience. Here are some tips and tricks that can help you survive and thrive in the game:

              -
                -
              • Gather as much resources as you can, especially wood, stone, fiber , and berries. You will need them to craft essential items, such as weapons, armor, tools, and structures.
              • -
              • Level up your character and your tamed creatures by gaining experience from various activities, such as harvesting, crafting, killing, and exploring. You can also use the ancient amber currency to buy XP boosters. You can spend your level-up points on improving your attributes, such as health, stamina, weight, melee damage, and more. You can also unlock new engrams, which are blueprints for crafting items.
              • -
              • Tame as many dinosaurs and creatures as you can, as they will help you in various ways, such as fighting, gathering, transporting, and more. You can tame them by knocking them out with tranquilizer arrows or darts, and then feeding them their preferred food. You can also use soothing balm to speed up the taming process. You can access your tamed creatures' inventory, stats, and commands by tapping on their nameplate.
              • -
              • Build a base to protect yourself and your belongings from the elements and enemies. You can use various materials, such as wood, stone, metal, and more to build walls, floors, ceilings, doors, windows, and more. You can also craft furniture, appliances, decorations, and more to make your base more comfortable and functional. You can also place turrets, traps, and spikes to defend your base from intruders.
              • -
              • Join a tribe or create your own to cooperate with other players and share resources, items, and creatures. You can also chat with your tribe members and allies using the in-game chat system. You can also engage in PvP or PvE battles with other tribes or creatures to claim territory and loot.
              • -
              -

              Conclusion

              -

              ARK: Survival Evolved is a game that offers a unique and thrilling experience of surviving in a prehistoric world full of dinosaurs and other dangers. You can download and install ARK: Survival Evolved APK for Android from the Google Play Store or from APKCombo. You can also play the game on your mobile device using the simple and intuitive controls. You can also follow the tips and tricks that we have shared in this article to survive and thrive in the game.

              -

              If you are ready to embark on this epic adventure, download ARK: Survival Evolved APK for Android today and start playing!

              -

              FAQs

              -

              Q1. Is ARK: Survival Evolved free to play on Android?

              -

              A1. Yes, ARK: Survival Evolved is free to play on Android devices. However, the game contains some in-app purchases that can enhance your gameplay or unlock some exclusive features.

              -

              Q2. How much storage space does ARK: Survival Evolved require on Android?

              -

              A2. ARK: Survival Evolved requires about 2 GB of storage space on Android devices. However, this may vary depending on the version of the game and the device model.

              -

              Q3. Can I play ARK: Survival Evolved with my friends on Android?

              -

              A3. Yes, you can play ARK: Survival Evolved with your friends on Android devices. You can join or create a server that supports multiplayer mode and invite your friends to join you. You can also join or create a tribe with your friends and cooperate or compete with other players.

              -

              Q4. Is ARK: Survival Evolved compatible with my Android device?

              -

              A4. ARK: Survival Evolved is compatible with most Android devices that run on Android 7.0 or higher and have at least 3 GB of RAM. However, some devices may not support the game due to hardware limitations or compatibility issues.

              -

              Q5. How can I contact the developers of ARK: Survival Evolved for support or feedback?

              -

              A5. You can contact the developers of ARK: Survival Evolved for support or feedback by using the following methods:

              -

              197e85843d
              -
              -
              \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download House Party Game APK for Free and Experience the Craziest Night of Your Life.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download House Party Game APK for Free and Experience the Craziest Night of Your Life.md deleted file mode 100644 index f903e93cc0cfe0092584260548be774aff630386..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download House Party Game APK for Free and Experience the Craziest Night of Your Life.md +++ /dev/null @@ -1,99 +0,0 @@ -
              -

              How to Download House Party Game APK for Android

              -

              Do you want to experience a college-charged, 3D adventure game that is about player choice and partying your way? If yes, then you should try House Party Game, a hilarious and interactive game that lets you explore a house full of fun, stories, and secrets. In this article, we will show you what House Party Game is, what features it has, how to download House Party Game APK for Android, and some tips and tricks for playing it.

              -

              house party game apk download


              Download >>> https://bltlly.com/2uOmyP



              -

              What is House Party Game?

              -

              House Party Game is a game developed by Rise Studios that was released in 2020. It is a game that simulates a house party where you can interact with various characters, make choices that affect the outcome of the story, and have fun in different ways. The game has a lot of humor, adult content, and replayability. You can choose from over 25 story opportunities to discover and complete, each telling a part of one of the many fun-filled stories that can play out. You can also find over 50 items to interact with and change the environment around you. The game features 15 of the most interesting, reactive, and hilarious NPCs you'll find in any game with the name "House" in the title. You can form friendships and relationships with these characters as you progress through the night.

              -

              Features of House Party Game

              -

              House Party Game has many features that make it an enjoyable and unique game. Here are some of them:

              -

              Branching narratives

              -

              House Party Game has intricate, branching narratives that depend on your choices and actions. Every choice you make will have a reaction in your playthrough. Sometimes simple things can affect your party in the most magnificent of ways. There is no true ending but dozens of ways to finish, so the story is what you make it.

              -

              Interactive items

              -

              House Party Game has over 50 items to interact with and change the environment around you. You can use these items to create chaos, solve puzzles, or impress others. Some of these items are very ordinary, like an mp3 player or a bottle of beer. Some are very insane, like a dude-bro AI or a talking wall-mounted fish.

              -

              Diverse characters

              -

              House Party Game has 15 of the most

              diverse characters that you can interact with and get to know. Each character has their own personality, backstory, and goals. You can befriend them, romance them, or annoy them. You can also influence their relationships with each other and see how they react to different situations. The characters are fully voiced and animated, making them more realistic and immersive.

              -

              house party mobile game apk free download
              -house party game android apk download latest version
              -house party game apk mod download for pc
              -house party game apk obb download offline
              -house party game apk full version download
              -house party game apk download no verification
              -house party game apk download highly compressed
              -house party game apk download 2023 update
              -house party game apk download rise studios
              -house party game apk download cracked
              -house party game apk download unblocked
              -house party game apk download reddit
              -house party game apk download windows 10
              -house party game apk download mac os
              -house party game apk download linux
              -house party game apk download steam
              -house party game apk download google play
              -house party game apk download amazon appstore
              -house party game apk download apkpure
              -house party game apk download aptoide
              -house party game apk download apkmirror
              -house party game apk download acmarket
              -house party game apk download blackmod
              -house party game apk download bluestacks
              -house party game apk download browsercam
              -house party game apk download choilieng
              -house party game apk download dlandroid
              -house party game apk download evozi
              -house party game apk download f95zone
              -house party game apk download gameloop
              -house party game apk download happymod
              -house party game apk download igg games
              -house party game apk download iosgods
              -house party game apk download kmodsapk
              -house party game apk download ldplayer
              -house party game apk download mob.org
              -house party game apk download noxplayer
              -house party game apk download oceanofapk
              -house party game apk download panda helper
              -house party game apk download qooapp
              -house party game apk download rexdl
              -house party game apk download skidrow reloaded
              -house party game apk download uptodown
              -house party game apk download vmos pro
              -house party game apk download xapk installer
              -house party game apk download yalp store
              -house party game beta version 0.19.5 android port modded (18+) free full latest new update 2021 offline installer zip rar file direct link how to install tutorial video youtube[^1^]

              -

              How to download House Party Game APK for Android

              -

              If you want to play House Party Game on your Android device, you will need to download the APK file from a trusted source. APK stands for Android Package Kit, and it is a file format that contains all the elements needed to install an app on your device. However, since House Party Game is not available on the Google Play Store, you will need to enable unknown sources on your device before installing the APK file. Here are the steps to do that:

              -

              Step 1: Enable unknown sources

              -

              Go to your device's settings and look for the security or privacy option. Tap on it and find the option that says "Allow installation of apps from unknown sources" or something similar. Toggle it on and confirm your choice.

              -

              Step 2: Download APK file

              -

              Go to a reliable website that offers House Party Game APK for Android, such as [APKPure] or [APKMirror]. Make sure you download the latest version of the game, which is 0.19.5 as of June 2023. Tap on the download button and wait for the file to be saved on your device.

              -

              Step 3: Install APK file

              -

              Once the download is complete, locate the APK file on your device using a file manager app or your notification bar. Tap on the file and follow the instructions to install it. You may need to grant some permissions to the app during the installation process.

              -

              Step 4: Launch House Party Game

              -

              After the installation is done, you can find House Party Game on your app drawer or home screen. Tap on the icon and enjoy the game!

              -

              Tips and tricks for playing House Party Game

              -

              House Party Game is a game that requires you to explore, interact, and make choices. It can be challenging and confusing at times, especially if you are new to the game. Here are some tips and tricks that can help you have a better experience:

              -

              Explore the house

              -

              The house is full of secrets, surprises, and opportunities. You should explore every room and every corner of the house to find items, clues, and story events. You never know what you might discover or trigger by exploring.

              -

              Talk to everyone

              -

              The characters are the heart of House Party Game. They have their own stories, personalities, and preferences. You should talk to everyone and learn more about them. You can also compliment them, flirt with them, insult them, or prank them. Your interactions will affect how they feel about you and how they behave around you.

              -

              Save your progress

              -

              House Party Game has a save system that allows you to save your progress at any point in the game. You can also load your previous saves if you want to try different choices or outcomes. You should save often and use multiple slots to avoid losing your progress or getting stuck.

              -

              Use items wisely

              -

              Items are very useful in House Party Game. They can help you solve puzzles, unlock doors, impress others, or create chaos. You should use items wisely and experiment with different combinations. Some items have multiple uses or effects depending on how you use them.

              -

              Conclusion

              -

              House Party Game is a fun and interactive game that lets you experience a house party like never before. You can explore a house full of stories, secrets, and fun. You can interact with diverse characters and make choices that affect the outcome of the game. You can also download House Party Game APK for Android from trusted sources and enjoy the game on your device.

              -

              Frequently Asked Questions

              -

              Here are some frequently asked questions about House Party Game:

              -
                -
              1. Is House Party Game free?
              2. -

                Yes, House Party Game is free to play on PC and Android devices. However, you can support the developers by purchasing DLCs or donating via Patreon.

                -
              3. Is House Party Game safe?
              4. -

                Yes, House Party Game is safe to play as long as you download it from trusted sources and enable unknown sources on your device. However, House Party Game contains adult content and humor that may not be suitable for everyone.

              5. How long is House Party Game?
              6. -

                House Party Game has no fixed length, as it depends on your choices and actions. However, you can expect to spend at least a few hours to complete all the story opportunities and explore all the possibilities.

                -
              7. Can I play House Party Game with friends?
              8. -

                House Party Game is currently a single-player game, but the developers have plans to add multiplayer features in the future. You can follow their updates on their website or social media platforms.

                -
              9. Can I customize my character in House Party Game?
              10. -

                Yes, you can customize your character's appearance, name, and gender in House Party Game. You can also choose from different outfits and accessories to suit your style.

                -
              -

              I hope you enjoyed this article and learned something new. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading and have a great day!

              401be4b1e0
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Bulk Image Downloader 4.91 Multilingual Patch [CrackingPatching] TOP.md b/spaces/tioseFevbu/cartoon-converter/scripts/Bulk Image Downloader 4.91 Multilingual Patch [CrackingPatching] TOP.md deleted file mode 100644 index 7d74cc0f88f750fddbfd63a88a9c2137f54ed4dd..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Bulk Image Downloader 4.91 Multilingual Patch [CrackingPatching] TOP.md +++ /dev/null @@ -1,13 +0,0 @@ - -

              How to Download Multiple Images from Any Website with Bulk Image Downloader 4.91 Multilingual Patch

              -

              If you are looking for a fast and easy way to download multiple images from any website, you might want to try Bulk Image Downloader 4.91 Multilingual Patch. This is a powerful software that can download images from almost any web page, including galleries, forums, blogs, social media, and more. You can also download images from password-protected sites, embedded videos, and image hosts like Flickr, Imgur, and Picasa.

              -

              Bulk Image Downloader 4.91 Multilingual Patch supports many file formats, such as JPG, PNG, GIF, BMP, TIFF, and more. You can also customize the file names, save locations, and image sizes according to your preferences. You can also filter out unwanted images by size, type, or resolution. Bulk Image Downloader 4.91 Multilingual Patch can also resume interrupted downloads and handle redirections and pop-ups.

              -

              Bulk Image Downloader 4.91 Multilingual Patch [CrackingPatching]


              DOWNLOADhttps://urlcod.com/2uHwTY



              -

              To use Bulk Image Downloader 4.91 Multilingual Patch, you need to download it from the official website or from CrackingPatching.com. Then, you need to install it on your computer and apply the patch file to activate the full version. After that, you can launch the software and copy the URL of the web page that contains the images you want to download. Then, paste it into the software and click on the Download button. The software will scan the web page and display all the images that can be downloaded. You can select or deselect the images you want and click on the Start button to begin the download process.

              -

              Bulk Image Downloader 4.91 Multilingual Patch is a handy tool that can save you time and bandwidth when downloading multiple images from any website. It is also compatible with most browsers and can integrate with them for easier access. You can also use it to download full-sized images from thumbnails or links. Bulk Image Downloader 4.91 Multilingual Patch is a must-have software for anyone who loves collecting images from the web.

              - -

              One of the best features of Bulk Image Downloader 4.91 Multilingual Patch is that it can download images from multiple tabs or windows at once. You can also queue up multiple downloads and let the software run in the background while you browse the web. You can also pause and resume the downloads at any time. Bulk Image Downloader 4.91 Multilingual Patch can also automatically detect and download images from multi-page galleries and slideshows.

              -

              Another great feature of Bulk Image Downloader 4.91 Multilingual Patch is that it can download images from social media sites like Facebook, Twitter, Instagram, Pinterest, and more. You can also download images from your friends' albums or pages, as well as from public profiles and groups. You can also download images from private messages or chats. Bulk Image Downloader 4.91 Multilingual Patch can also download images from other popular sites like Reddit, Tumblr, DeviantArt, 4chan, and more.

              -

              Bulk Image Downloader 4.91 Multilingual Patch is not only a software for downloading images, but also a software for managing and organizing them. You can create folders and subfolders for your downloaded images and sort them by name, date, size, or type. You can also view your downloaded images in a thumbnail gallery or a full-screen slideshow. You can also edit, rename, delete, or move your downloaded images as you wish.

              cec2833e83
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Cozy Beats Lofi Hip Hop WAV.md b/spaces/tioseFevbu/cartoon-converter/scripts/Cozy Beats Lofi Hip Hop WAV.md deleted file mode 100644 index 5bc8e7e9ff1b0eb105aff36b5bec95fb9815d00d..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Cozy Beats Lofi Hip Hop WAV.md +++ /dev/null @@ -1,17 +0,0 @@ - -

              Cozy Beats Lofi Hip Hop WAV: How to Create Relaxing Music for Study and Chill

              -

              Lofi hip hop is a genre of music that combines elements of hip hop, jazz, soul, and ambient music to create a soothing and relaxing sound. Lofi hip hop is often used as background music for studying, working, or relaxing, as it helps to create a cozy and calm atmosphere. Lofi hip hop is also known for its characteristic low-fidelity sound quality, which adds a nostalgic and vintage feel to the music.

              -

              Cozy Beats Lofi Hip Hop WAV


              Download File >>> https://urlcod.com/2uHxJp



              -

              If you want to create your own lofi hip hop beats, you will need some basic tools and skills. In this article, we will show you how to use Cozy Beats Lofi Hip Hop WAV, a sample pack from Apollo Sound that contains 12 smooth and mellow lofi instrumentals, to create your own cozy chill-fi beats. Cozy Beats Lofi Hip Hop WAV is a collection of high-quality wav loops, drum hits, midi files, and live instruments that you can use to create your own lofi hip hop tracks in any DAW or sampler. Here are some steps to follow:

              -
                -
              1. Choose a composition or a song starter from the sample pack. Each composition contains a full mix and stem bounces of different elements, such as drums, bass, sax, guitar, keys, synths, foleys, and effects. Each song starter contains a full mix and individual loops of each element. You can use the full mix as a reference or inspiration, or you can use the individual loops to create your own arrangement.
              2. -
              3. Drag and drop the loops into your DAW or sampler. You can adjust the tempo and key of the loops to fit your project. You can also mix and match different loops from different compositions or song starters to create your own unique combination.
              4. -
              5. Add some drum hits from the sample pack to create your own drum patterns. Cozy Beats Lofi Hip Hop WAV contains 100 drum hits, including kicks, snares, claps, hats, cymbals, and percussions. You can use a drum machine or a sampler to load the drum hits and program your own beats. You can also use some effects, such as compression, EQ, reverb, delay, distortion, or vinyl crackle, to add some lofi flavor to your drums.
              6. -
              7. Add some midi files from the sample pack to create your own melodies or chords. Cozy Beats Lofi Hip Hop WAV contains 23 midi files that correspond to some of the loops in the sample pack. You can use a midi keyboard or a software instrument to play the midi files and tweak them to your liking. You can also use some effects, such as chorus, flanger, phaser, or filter, to add some movement and variation to your melodies or chords.
              8. -
              9. Add some live instruments from the sample pack to add some organic and realistic touch to your track. Cozy Beats Lofi Hip Hop WAV contains live recordings of saxophone, trumpet, guitar, bass guitar, piano, electric piano, organ, and synth. You can use these live instruments as they are or you can chop them up and rearrange them to create your own samples.
              10. -
              11. Mix and master your track. Once you have all the elements in place, you can adjust the volume levels, panning positions, EQ settings, compression settings, reverb settings, delay settings, and other effects settings of each element to balance your mix and make it sound cohesive and harmonious. You can also use some mastering tools, such as limiter, maximizer, multiband compressor, stereo enhancer, or exciter, to polish your track and make it sound loud and clear.
              12. -
              -

              Congratulations! You have just created your own cozy chill-fi beat using Cozy Beats Lofi Hip Hop WAV. You can now export your track as a wav file and share it with your friends or upload it online. You can also use Cozy Beats Lofi Hip Hop WAV to create more lofi hip hop tracks with different vibes and moods. The possibilities are endless!

              -

              Cozy Beats Lofi Hip Hop WAV is available for purchase at Loopmasters for $29.95 USD. It is 100% royalty free for using in your production. If you are looking for more inspiration in lo

              7b8c122e87
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Download Arcsoft Totalmedia 3.5 Full Cracked [BETTER].md b/spaces/tioseFevbu/cartoon-converter/scripts/Download Arcsoft Totalmedia 3.5 Full Cracked [BETTER].md deleted file mode 100644 index 9b9c3aec302b09d4050857cb5d703fb3b320e908..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Download Arcsoft Totalmedia 3.5 Full Cracked [BETTER].md +++ /dev/null @@ -1,22 +0,0 @@ - -

              How to Download Arcsoft Totalmedia 3.5 Full Cracked for Free

              -

              Arcsoft Totalmedia 3.5 is a multimedia software that allows you to watch, record, edit, and enjoy TV shows, movies, music, photos, and videos on your PC. It supports various TV standards, such as ISDB-T, which is mainly used in Brazil[^1^]. It also has features like video editing, slideshow creation, DVD burning, and media conversion.

              -

              Download Arcsoft Totalmedia 3.5 Full Cracked


              Download Ziphttps://urlcod.com/2uHvJV



              -

              If you want to download Arcsoft Totalmedia 3.5 full cracked for free, you might be tempted to look for torrent sites or file-sharing platforms that offer illegal copies of the software. However, this is not a good idea, as you might expose your computer to viruses, malware, spyware, or other threats that could harm your system or compromise your privacy. Moreover, you might face legal consequences for violating the copyright laws and the terms of service of the software.

              -

              Fortunately, there is a better and safer way to download Arcsoft Totalmedia 3.5 full cracked for free. You can use the Internet Archive, which is a non-profit digital library that preserves and provides access to millions of free books, movies, software, music, websites, and more. The Internet Archive has a copy of Arcsoft Totalmedia 3.5 that you can download and use without any restrictions or risks.

              -

              To download Arcsoft Totalmedia 3.5 full cracked for free from the Internet Archive, follow these steps:

              -

              -
                -
              1. Go to https://archive.org/details/ArcSoft_TotalMedia_3.5.23.341 [^2^]. This is the page where you can find the software.
              2. -
              3. Click on the "DOWNLOAD OPTIONS" section on the right side of the page. You will see a list of files that you can download.
              4. -
              5. Click on the file named "ArcSoft_TotalMedia_3.5.23.341.zip". This is the compressed file that contains the software.
              6. -
              7. Save the file to your computer and wait for it to finish downloading.
              8. -
              9. Extract the file using a program like WinZip or WinRAR.
              10. -
              11. Open the extracted folder and double-click on the file named "setup.exe". This will launch the installation wizard.
              12. -
              13. Follow the instructions on the screen to install the software.
              14. -
              15. Enjoy using Arcsoft Totalmedia 3.5 full cracked for free!
              16. -
              -

              Note: You might need to disable your antivirus or firewall temporarily during the installation process, as some programs might detect the software as a potential threat.

              -

              We hope this article helped you download Arcsoft Totalmedia 3.5 full cracked for free from the Internet Archive. If you have any questions or feedback, please leave a comment below.

              81aa517590
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/FSX Steam Edition Air Hauler 2 Add-On Download] [Crack Serial Key WORK.md b/spaces/tioseFevbu/cartoon-converter/scripts/FSX Steam Edition Air Hauler 2 Add-On Download] [Crack Serial Key WORK.md deleted file mode 100644 index 4204d12e9688aaff5244e2ffeb658a936083822a..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/FSX Steam Edition Air Hauler 2 Add-On Download] [Crack Serial Key WORK.md +++ /dev/null @@ -1,18 +0,0 @@ - -

              FSX Steam Edition: Air Hauler 2 Add-On Review

              -

              If you are looking for a realistic and immersive way to manage your own aviation business in FSX Steam Edition, you might want to check out the Air Hauler 2 Add-On. This add-on lets you create and run your own freight and passenger company, with a lot of features and options to customize your operation.

              -

              Air Hauler 2 is a sequel to the popular Air Hauler add-on, which was released in 2009. Air Hauler 2 improves on the original with a new user interface, more aircraft types, dynamic markets, real-world weather, multiplayer support, and much more. You can start your company from scratch or take over an existing one, and choose from different modes of play, such as career, sandbox, or online.

              -

              FSX Steam Edition: Air Hauler 2 Add-On Download] [Crack Serial Key


              Download Zip === https://urlcod.com/2uHxs0



              -

              As the owner of your company, you will have to deal with various aspects of running an aviation business, such as hiring pilots, buying or leasing aircraft, maintaining your fleet, planning routes, setting fares, managing finances, and expanding your network. You can also fly any of the missions yourself, using either the default FSX aircraft or any add-on aircraft you have installed. You can also hire other players to fly for you online, or join another player's company as a pilot.

              -

              Air Hauler 2 is a very detailed and realistic add-on that will appeal to fans of simulation and management games. It adds a lot of depth and challenge to FSX Steam Edition, and gives you a lot of freedom and flexibility to run your company as you wish. If you are looking for a new way to enjoy FSX Steam Edition, you should definitely give Air Hauler 2 a try.

              Some of the features that make Air Hauler 2 a great add-on for FSX Steam Edition are:

              -
                -
              • Cargo jobs – you can choose from a variety of cargo missions, from simple deliveries to complex logistics, and get paid according to the distance, weight, and urgency of the cargo. You can also access an online global shared job board, where you can find more jobs from other Air Hauler 2 users or post your own jobs for them to fly.
              • -
              • Passenger operations – you can fly single passenger jobs between any airport you choose, or set up a schedule and routes for your airline to fly. You can set your own fares and adjust them according to demand and competition. You can also fly special humanitarian missions, such as evacuating refugees or delivering aid.
              • -
              • Dynamic markets – the prices of commodities and fuel vary according to supply and demand, and you can take advantage of this by trading commodities for delivery on your cargo routes. You can also open manufacturing plants and factories at your bases to create parts for manufacture into more precious commodities.
              • -
              • Real-world weather – you can choose to fly with real-world weather conditions, which will affect your flight performance and fuel consumption. You can also check the weather forecast before you plan your route and adjust it accordingly.
              • -
              • Mult

                Another feature that makes Air Hauler 2 a great add-on for FSX Steam Edition is multiplayer support. You can either create your own virtual airline and recruit other Air Hauler 2 pilots to fly for you, or join another pilot's virtual airline and fly for them. You can select jobs from a global shared job board, where you can find more opportunities and challenges. You can also communicate with other pilots using the built-in chat system, and view their live locations on the map.

                -

                Multiplayer mode adds a lot of fun and realism to Air Hauler 2, as you can cooperate or compete with other pilots around the world. You can also learn from each other and share tips and tricks. You can also fly together in formation or escort each other on dangerous missions. Multiplayer mode is optional, so you can still enjoy Air Hauler 2 in single-player mode if you prefer.

                -

                -

                Air Hauler 2 is a must-have add-on for FSX Steam Edition users who want to experience the thrill and challenge of running their own aviation business. It offers a lot of features and options to customize your company and your flights, and it integrates seamlessly with FSX Steam Edition. It also supports any add-on aircraft you have installed, so you can fly your favorite planes in Air Hauler 2. Whether you want to fly cargo or passengers, solo or online, Air Hauler 2 will give you hours of enjoyment and satisfaction.

                e93f5a0c3f
                -
                -
                \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_common.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_common.py deleted file mode 100644 index a12e2c75d132c73b556702159d535d15ed9abfd2..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_common.py +++ /dev/null @@ -1,104 +0,0 @@ -import os -import pathlib -import tempfile -import functools -import contextlib -import types -import importlib - -from typing import Union, Optional -from .abc import ResourceReader, Traversable - -from ._compat import wrap_spec - -Package = Union[types.ModuleType, str] - - -def files(package): - # type: (Package) -> Traversable - """ - Get a Traversable resource from a package - """ - return from_package(get_package(package)) - - -def get_resource_reader(package): - # type: (types.ModuleType) -> Optional[ResourceReader] - """ - Return the package's loader if it's a ResourceReader. - """ - # We can't use - # a issubclass() check here because apparently abc.'s __subclasscheck__() - # hook wants to create a weak reference to the object, but - # zipimport.zipimporter does not support weak references, resulting in a - # TypeError. That seems terrible. - spec = package.__spec__ - reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore - if reader is None: - return None - return reader(spec.name) # type: ignore - - -def resolve(cand): - # type: (Package) -> types.ModuleType - return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand) - - -def get_package(package): - # type: (Package) -> types.ModuleType - """Take a package name or module object and return the module. - - Raise an exception if the resolved module is not a package. - """ - resolved = resolve(package) - if wrap_spec(resolved).submodule_search_locations is None: - raise TypeError(f'{package!r} is not a package') - return resolved - - -def from_package(package): - """ - Return a Traversable object for the given package. - - """ - spec = wrap_spec(package) - reader = spec.loader.get_resource_reader(spec.name) - return reader.files() - - -@contextlib.contextmanager -def _tempfile(reader, suffix=''): - # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' - # blocks due to the need to close the temporary file to work on Windows - # properly. - fd, raw_path = tempfile.mkstemp(suffix=suffix) - try: - try: - os.write(fd, reader()) - finally: - os.close(fd) - del reader - yield pathlib.Path(raw_path) - finally: - try: - os.remove(raw_path) - except FileNotFoundError: - pass - - -@functools.singledispatch -def as_file(path): - """ - Given a Traversable object, return that object as a - path on the local file system in a context manager. - """ - return _tempfile(path.read_bytes, suffix=path.name) - - -@as_file.register(pathlib.Path) -@contextlib.contextmanager -def _(path): - """ - Degenerate behavior for pathlib.Path objects. - """ - yield path diff --git a/spaces/tomg-group-umd/lm-watermarking/homoglyphs.py b/spaces/tomg-group-umd/lm-watermarking/homoglyphs.py deleted file mode 100644 index 925279eb3455eb63e36aea12c97ae9b685a15652..0000000000000000000000000000000000000000 --- a/spaces/tomg-group-umd/lm-watermarking/homoglyphs.py +++ /dev/null @@ -1,265 +0,0 @@ -"""Updated version of core.py from -https://github.com/yamatt/homoglyphs/tree/main/homoglyphs_fork -for modern python3 -""" - -from collections import defaultdict -import json -from itertools import product -import os -import unicodedata - -# Actions if char not in alphabet -STRATEGY_LOAD = 1 # load category for this char -STRATEGY_IGNORE = 2 # add char to result -STRATEGY_REMOVE = 3 # remove char from result - -ASCII_RANGE = range(128) - - -CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) -DATA_LOCATION = os.path.join(CURRENT_DIR, "homoglyph_data") - - -class Categories: - """ - Work with aliases from ISO 15924. - https://en.wikipedia.org/wiki/ISO_15924#List_of_codes - """ - - fpath = os.path.join(DATA_LOCATION, "categories.json") - - @classmethod - def _get_ranges(cls, categories): - """ - :return: iter: (start code, end code) - :rtype: list - """ - with open(cls.fpath, encoding="utf-8") as f: - data = json.load(f) - - for category in categories: - if category not in data["aliases"]: - raise ValueError("Invalid category: {}".format(category)) - - for point in data["points"]: - if point[2] in categories: - yield point[:2] - - @classmethod - def get_alphabet(cls, categories): - """ - :return: set of chars in alphabet by categories list - :rtype: set - """ - alphabet = set() - for start, end in cls._get_ranges(categories): - chars = (chr(code) for code in range(start, end + 1)) - alphabet.update(chars) - return alphabet - - @classmethod - def detect(cls, char): - """ - :return: category - :rtype: str - """ - with open(cls.fpath, encoding="utf-8") as f: - data = json.load(f) - - # try detect category by unicodedata - try: - category = unicodedata.name(char).split()[0] - except (TypeError, ValueError): - # In Python2 unicodedata.name raise error for non-unicode chars - # Python3 raise ValueError for non-unicode characters - pass - else: - if category in data["aliases"]: - return category - - # try detect category by ranges from JSON file. - code = ord(char) - for point in data["points"]: - if point[0] <= code <= point[1]: - return point[2] - - @classmethod - def get_all(cls): - with open(cls.fpath, encoding="utf-8") as f: - data = json.load(f) - return set(data["aliases"]) - - -class Languages: - fpath = os.path.join(DATA_LOCATION, "languages.json") - - @classmethod - def get_alphabet(cls, languages): - """ - :return: set of chars in alphabet by languages list - :rtype: set - """ - with open(cls.fpath, encoding="utf-8") as f: - data = json.load(f) - alphabet = set() - for lang in languages: - if lang not in data: - raise ValueError("Invalid language code: {}".format(lang)) - alphabet.update(data[lang]) - return alphabet - - @classmethod - def detect(cls, char): - """ - :return: set of languages which alphabet contains passed char. - :rtype: set - """ - with open(cls.fpath, encoding="utf-8") as f: - data = json.load(f) - languages = set() - for lang, alphabet in data.items(): - if char in alphabet: - languages.add(lang) - return languages - - @classmethod - def get_all(cls): - with open(cls.fpath, encoding="utf-8") as f: - data = json.load(f) - return set(data.keys()) - - -class Homoglyphs: - def __init__( - self, - categories=None, - languages=None, - alphabet=None, - strategy=STRATEGY_IGNORE, - ascii_strategy=STRATEGY_IGNORE, - ascii_range=ASCII_RANGE, - ): - # strategies - if strategy not in (STRATEGY_LOAD, STRATEGY_IGNORE, STRATEGY_REMOVE): - raise ValueError("Invalid strategy") - self.strategy = strategy - self.ascii_strategy = ascii_strategy - self.ascii_range = ascii_range - - # Homoglyphs must be initialized by any alphabet for correct work - if not categories and not languages and not alphabet: - categories = ("LATIN", "COMMON") - - # cats and langs - self.categories = set(categories or []) - self.languages = set(languages or []) - - # alphabet - self.alphabet = set(alphabet or []) - if self.categories: - alphabet = Categories.get_alphabet(self.categories) - self.alphabet.update(alphabet) - if self.languages: - alphabet = Languages.get_alphabet(self.languages) - self.alphabet.update(alphabet) - self.table = self.get_table(self.alphabet) - - @staticmethod - def get_table(alphabet): - table = defaultdict(set) - with open(os.path.join(DATA_LOCATION, "confusables_sept2022.json")) as f: - data = json.load(f) - for char in alphabet: - if char in data: - for homoglyph in data[char]: - if homoglyph in alphabet: - table[char].add(homoglyph) - return table - - @staticmethod - def get_restricted_table(source_alphabet, target_alphabet): - table = defaultdict(set) - with open(os.path.join(DATA_LOCATION, "confusables_sept2022.json")) as f: - data = json.load(f) - for char in source_alphabet: - if char in data: - for homoglyph in data[char]: - if homoglyph in target_alphabet: - table[char].add(homoglyph) - return table - - @staticmethod - def uniq_and_sort(data): - result = list(set(data)) - result.sort(key=lambda x: (-len(x), x)) - return result - - def _update_alphabet(self, char): - # try detect languages - langs = Languages.detect(char) - if langs: - self.languages.update(langs) - alphabet = Languages.get_alphabet(langs) - self.alphabet.update(alphabet) - else: - # try detect categories - category = Categories.detect(char) - if category is None: - return False - self.categories.add(category) - alphabet = Categories.get_alphabet([category]) - self.alphabet.update(alphabet) - # update table for new alphabet - self.table = self.get_table(self.alphabet) - return True - - def _get_char_variants(self, char): - if char not in self.alphabet: - if self.strategy == STRATEGY_LOAD: - if not self._update_alphabet(char): - return [] - elif self.strategy == STRATEGY_IGNORE: - return [char] - elif self.strategy == STRATEGY_REMOVE: - return [] - - # find alternative chars for current char - alt_chars = self.table.get(char, set()) - if alt_chars: - # find alternative chars for alternative chars for current char - alt_chars2 = [self.table.get(alt_char, set()) for alt_char in alt_chars] - # combine all alternatives - alt_chars.update(*alt_chars2) - # add current char to alternatives - alt_chars.add(char) - - # uniq, sort and return - return self.uniq_and_sort(alt_chars) - - def _get_combinations(self, text, ascii=False): - variations = [] - for char in text: - alt_chars = self._get_char_variants(char) - - if ascii: - alt_chars = [char for char in alt_chars if ord(char) in self.ascii_range] - if not alt_chars and self.ascii_strategy == STRATEGY_IGNORE: - return - - if alt_chars: - variations.append(alt_chars) - if variations: - for variant in product(*variations): - yield "".join(variant) - - def get_combinations(self, text): - return list(self._get_combinations(text)) - - def _to_ascii(self, text): - for variant in self._get_combinations(text, ascii=True): - if max(map(ord, variant)) in self.ascii_range: - yield variant - - def to_ascii(self, text): - return self.uniq_and_sort(self._to_ascii(text)) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py deleted file mode 100644 index 14c1eb2881478f5db95e413446f9cd86b3b6ca29..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py +++ /dev/null @@ -1,23 +0,0 @@ -_base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py' -model = dict( - pretrained='open-mmlab://resnext101_32x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - style='pytorch')) -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=3665, - warmup_ratio=1.0 / 80, - step=[17, 23]) -runner = dict(type='EpochBasedRunner', max_epochs=25) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/embedding_rpn_head.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/embedding_rpn_head.py deleted file mode 100644 index 15786b3dca3a8f13a79b9a80560f2277990a8089..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/embedding_rpn_head.py +++ /dev/null @@ -1,107 +0,0 @@ -import torch -import torch.nn as nn -from mmcv.runner import BaseModule - -from mmdet.models.builder import HEADS -from ...core import bbox_cxcywh_to_xyxy - - -@HEADS.register_module() -class EmbeddingRPNHead(BaseModule): - """RPNHead in the `Sparse R-CNN `_ . - - Unlike traditional RPNHead, this module does not need FPN input, but just - decode `init_proposal_bboxes` and expand the first dimension of - `init_proposal_bboxes` and `init_proposal_features` to the batch_size. - - Args: - num_proposals (int): Number of init_proposals. Default 100. - proposal_feature_channel (int): Channel number of - init_proposal_feature. Defaults to 256. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - num_proposals=100, - proposal_feature_channel=256, - init_cfg=None, - **kwargs): - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - super(EmbeddingRPNHead, self).__init__(init_cfg) - self.num_proposals = num_proposals - self.proposal_feature_channel = proposal_feature_channel - self._init_layers() - - def _init_layers(self): - """Initialize a sparse set of proposal boxes and proposal features.""" - self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4) - self.init_proposal_features = nn.Embedding( - self.num_proposals, self.proposal_feature_channel) - - def init_weights(self): - """Initialize the init_proposal_bboxes as normalized. - - [c_x, c_y, w, h], and we initialize it to the size of the entire - image. - """ - super(EmbeddingRPNHead, self).init_weights() - nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5) - nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1) - - def _decode_init_proposals(self, imgs, img_metas): - """Decode init_proposal_bboxes according to the size of images and - expand dimension of init_proposal_features to batch_size. - - Args: - imgs (list[Tensor]): List of FPN features. - img_metas (list[dict]): List of meta-information of - images. Need the img_shape to decode the init_proposals. - - Returns: - Tuple(Tensor): - - - proposals (Tensor): Decoded proposal bboxes, - has shape (batch_size, num_proposals, 4). - - init_proposal_features (Tensor): Expanded proposal - features, has shape - (batch_size, num_proposals, proposal_feature_channel). - - imgs_whwh (Tensor): Tensor with shape - (batch_size, 4), the dimension means - [img_width, img_height, img_width, img_height]. - """ - proposals = self.init_proposal_bboxes.weight.clone() - proposals = bbox_cxcywh_to_xyxy(proposals) - num_imgs = len(imgs[0]) - imgs_whwh = [] - for meta in img_metas: - h, w, _ = meta['img_shape'] - imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]])) - imgs_whwh = torch.cat(imgs_whwh, dim=0) - imgs_whwh = imgs_whwh[:, None, :] - - # imgs_whwh has shape (batch_size, 1, 4) - # The shape of proposals change from (num_proposals, 4) - # to (batch_size ,num_proposals, 4) - proposals = proposals * imgs_whwh - - init_proposal_features = self.init_proposal_features.weight.clone() - init_proposal_features = init_proposal_features[None].expand( - num_imgs, *init_proposal_features.size()) - return proposals, init_proposal_features, imgs_whwh - - def forward_dummy(self, img, img_metas): - """Dummy forward function. - - Used in flops calculation. - """ - return self._decode_init_proposals(img, img_metas) - - def forward_train(self, img, img_metas): - """Forward function in training stage.""" - return self._decode_init_proposals(img, img_metas) - - def simple_test_rpn(self, img, img_metas): - """Forward function in testing stage.""" - return self._decode_init_proposals(img, img_metas) diff --git a/spaces/tsi-org/LLaVA/llava/eval/webpage/styles.css b/spaces/tsi-org/LLaVA/llava/eval/webpage/styles.css deleted file mode 100644 index 7b6d6fc69b336c0a5d103be9fb13a0e0897c76a3..0000000000000000000000000000000000000000 --- a/spaces/tsi-org/LLaVA/llava/eval/webpage/styles.css +++ /dev/null @@ -1,105 +0,0 @@ -body { - font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; - background-color: #f8f9fa; -} - -.navbar-dark .navbar-nav .nav-link { - color: #f1cf68; - font-size: 1.1rem; - padding: 0.5rem 0.6rem; -} - -.card-header { - font-weight: bold; -} - -.card { - box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); - transition: 0.3s; -} - -.card:hover { - box-shadow: 0 8px 16px rgba(0, 0, 0, 0.2); -} - -button { - transition: background-color 0.3s; -} - -button:hover { - background-color: #007bff; -} - -@media (max-width: 767px) { - .form-row .form-group { - margin-bottom: 10px; - } -} - -/* Extra styles */ - -.expandable-card .card-text-container { - max-height: 200px; - overflow-y: hidden; - position: relative; -} - -.expandable-card.expanded .card-text-container { - max-height: none; -} - -.expand-btn { - position: relative; - display: none; - background-color: rgba(255, 255, 255, 0.8); - color: #510c75; - border-color: transparent; -} - -.expand-btn:hover { - background-color: rgba(200, 200, 200, 0.8); - text-decoration: none; - border-color: transparent; - color: #510c75; -} - -.expand-btn:focus { - outline: none; - text-decoration: none; -} - -.expandable-card:not(.expanded) .card-text-container:after { - content: ""; - position: absolute; - bottom: 0; - left: 0; - width: 100%; - height: 90px; - background: linear-gradient(rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 1)); -} - -.expandable-card:not(.expanded) .expand-btn { - margin-top: -40px; -} - -.card-body { - padding-bottom: 5px; -} - -.vertical-flex-layout { - justify-content: center; - align-items: center; - height: 100%; - display: flex; - flex-direction: column; - gap: 5px; -} - -.figure-img { - max-width: 100%; - height: auto; -} - -.adjustable-font-size { - font-size: calc(0.5rem + 2vw); -} diff --git a/spaces/ttt246/brain/Brain/src/model/sms_model.py b/spaces/ttt246/brain/Brain/src/model/sms_model.py deleted file mode 100644 index db840d5b59983606fdb1e2a0302e3dbaa8870a8c..0000000000000000000000000000000000000000 --- a/spaces/ttt246/brain/Brain/src/model/sms_model.py +++ /dev/null @@ -1,16 +0,0 @@ -"""sms message model includes from, to and body""" -from typing import Any - -from Brain.src.model.requests.request_model import SendSMS - - -class SMSModel: - def __init__(self, _from="", _to="", body=""): - self._from = _from - self._to = _to - self.body = body - - def get_sms_model(self, data: SendSMS.Body) -> None: - self._from = data._from - self._to = data.to - self.body = data.body diff --git a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/download_tool.py b/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/download_tool.py deleted file mode 100644 index 211dc4efa115e9504af7beaa11a94fe0d6e87b46..0000000000000000000000000000000000000000 --- a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/download_tool.py +++ /dev/null @@ -1,77 +0,0 @@ -import mmap -import os -import requests -from hashlib import sha1 - - -# def download_file(url, fp): -# r = requests.get(url) -# assert r.status_code == 200, f'Error! Download failure. URL: {url} OUT: {fp}' -# with open(fp, "wb") as f: -# f.write(r.content) - - -def sha1_check(fp, sha1_code): - f = open(fp, 'rb') - sha1_obj = sha1(mmap.mmap(f.fileno(), os.path.getsize(fp), access=mmap.ACCESS_READ)) - f.close() - return sha1_obj.hexdigest() == sha1_code.lower() - - -def download_file(url, fp, sha1_code=None, proxies=None): - ''' - 支持断点续传的下载函数 - :param url: - :param fp: - :param sha1_code: - :param proxies: - :return: - ''' - if proxies is None: - proxies = {} - - r = requests.get(url, stream=True) - - if 'Content-Length' in r.headers: - size = int(r.headers['Content-Length']) - assert size >= 0, 'Error! Bad file size.' - else: - size = None - - r.close() - - if size is None and os.path.isfile(fp): - # 目标不支持没有已知的大小,删掉现有的然后重新下载 - print('Info! The downloaded file has no explicit size. Will delete and download it again.') - os.unlink(fp) - - downloaded_size = 0 - if os.path.isfile(fp): - downloaded_size = os.path.getsize(fp) - - headers = {} - if downloaded_size != 0 and size is not None: - if downloaded_size == size: - # 目标已下载完成 - if sha1_code is not None: - if sha1_check(fp, sha1_code): - return - else: - os.unlink(fp) - downloaded_size = 0 - print('Info! The downloaded file is corrupt, download it again.') - headers.update(dict(Range=f'bytes={downloaded_size}-')) - - try: - r = requests.get(url, stream=True, headers=headers, proxies=proxies) - with open(fp, 'ab') as f: - for chunk in r.iter_content(chunk_size=1024**2): - downloaded_size += len(chunk) - f.write(chunk) - f.flush() - except Exception as e: - print(e) - raise f'Error! Download failure. URL: {url} OUT: {fp}' - - if sha1_code is not None and not sha1_check(fp, sha1_code): - raise AssertionError('Error! The downloaded file is corrupt, please download it again.') diff --git a/spaces/ucalyptus/PTI/models/e4e/psp.py b/spaces/ucalyptus/PTI/models/e4e/psp.py deleted file mode 100644 index bf9f75dbaa66997abfc1e3e0e4f19ddfec7fedac..0000000000000000000000000000000000000000 --- a/spaces/ucalyptus/PTI/models/e4e/psp.py +++ /dev/null @@ -1,97 +0,0 @@ -import matplotlib -from configs import paths_config -matplotlib.use('Agg') -import torch -from torch import nn -from models.e4e.encoders import psp_encoders -from models.e4e.stylegan2.model import Generator - - -def get_keys(d, name): - if 'state_dict' in d: - d = d['state_dict'] - d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name} - return d_filt - - -class pSp(nn.Module): - - def __init__(self, opts): - super(pSp, self).__init__() - self.opts = opts - # Define architecture - self.encoder = self.set_encoder() - self.decoder = Generator(opts.stylegan_size, 512, 8, channel_multiplier=2) - self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256)) - # Load weights if needed - self.load_weights() - - def set_encoder(self): - if self.opts.encoder_type == 'GradualStyleEncoder': - encoder = psp_encoders.GradualStyleEncoder(50, 'ir_se', self.opts) - elif self.opts.encoder_type == 'Encoder4Editing': - encoder = psp_encoders.Encoder4Editing(50, 'ir_se', self.opts) - else: - raise Exception('{} is not a valid encoders'.format(self.opts.encoder_type)) - return encoder - - def load_weights(self): - if self.opts.checkpoint_path is not None: - print('Loading e4e over the pSp framework from checkpoint: {}'.format(self.opts.checkpoint_path)) - ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu') - self.encoder.load_state_dict(get_keys(ckpt, 'encoder'), strict=True) - self.decoder.load_state_dict(get_keys(ckpt, 'decoder'), strict=True) - self.__load_latent_avg(ckpt) - else: - print('Loading encoders weights from irse50!') - encoder_ckpt = torch.load(paths_config.ir_se50) - self.encoder.load_state_dict(encoder_ckpt, strict=False) - print('Loading decoder weights from pretrained!') - ckpt = torch.load(self.opts.stylegan_weights) - self.decoder.load_state_dict(ckpt['g_ema'], strict=False) - self.__load_latent_avg(ckpt, repeat=self.encoder.style_count) - - def forward(self, x, resize=True, latent_mask=None, input_code=False, randomize_noise=True, - inject_latent=None, return_latents=False, alpha=None): - if input_code: - codes = x - else: - codes = self.encoder(x) - # normalize with respect to the center of an average face - if self.opts.start_from_latent_avg: - if codes.ndim == 2: - codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)[:, 0, :] - else: - codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1) - - if latent_mask is not None: - for i in latent_mask: - if inject_latent is not None: - if alpha is not None: - codes[:, i] = alpha * inject_latent[:, i] + (1 - alpha) * codes[:, i] - else: - codes[:, i] = inject_latent[:, i] - else: - codes[:, i] = 0 - - input_is_latent = not input_code - images, result_latent = self.decoder([codes], - input_is_latent=input_is_latent, - randomize_noise=randomize_noise, - return_latents=return_latents) - - if resize: - images = self.face_pool(images) - - if return_latents: - return images, result_latent - else: - return images - - def __load_latent_avg(self, ckpt, repeat=None): - if 'latent_avg' in ckpt: - self.latent_avg = ckpt['latent_avg'].to(self.opts.device) - if repeat is not None: - self.latent_avg = self.latent_avg.repeat(repeat, 1) - else: - self.latent_avg = None diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/!FREE! Naruto To Boruto Shinobi Striker-3DM _BEST_ Crackl.md b/spaces/usbethFlerru/sovits-modelsV2/example/!FREE! Naruto To Boruto Shinobi Striker-3DM _BEST_ Crackl.md deleted file mode 100644 index f761b6a96f0b1d31f2fea79bf1ece6964d2835b6..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/!FREE! Naruto To Boruto Shinobi Striker-3DM _BEST_ Crackl.md +++ /dev/null @@ -1,6 +0,0 @@ -

                !FREE! Naruto To Boruto Shinobi Striker-3DM Crackl


                DOWNLOAD ►►►►► https://urlcod.com/2uyVIJ



                -
                - aaccfb2cb3
                -
                -
                -

                diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Anji Movie English Sub Download Watch the Epic Telugu Fantasy Film Online.md b/spaces/usbethFlerru/sovits-modelsV2/example/Anji Movie English Sub Download Watch the Epic Telugu Fantasy Film Online.md deleted file mode 100644 index 0409a5eaa30afb34edc0131e9497367975bd838c..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Anji Movie English Sub Download Watch the Epic Telugu Fantasy Film Online.md +++ /dev/null @@ -1,6 +0,0 @@ -

                Anji Movie English Sub Download


                Download ★★★★★ https://urlcod.com/2uyX6b



                - - aaccfb2cb3
                -
                -
                -

                diff --git a/spaces/usernamelsp/QQsign/README.md b/spaces/usernamelsp/QQsign/README.md deleted file mode 100644 index 3042be806844c4b6d92719e8afaa17d09c970d46..0000000000000000000000000000000000000000 --- a/spaces/usernamelsp/QQsign/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: QQsign -emoji: 🦀 -colorFrom: blue -colorTo: purple -sdk: docker -pinned: false -license: mit -duplicated_from: CikeyQI/QQsign ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/vaibhavsharda/semantic_clustering/twc_embeddings.py b/spaces/vaibhavsharda/semantic_clustering/twc_embeddings.py deleted file mode 100644 index 4529381e749e50255bb276427fc39f0cdd5cf6da..0000000000000000000000000000000000000000 --- a/spaces/vaibhavsharda/semantic_clustering/twc_embeddings.py +++ /dev/null @@ -1,407 +0,0 @@ -from transformers import AutoModel, AutoTokenizer -from transformers import AutoModelForCausalLM -from scipy.spatial.distance import cosine -import argparse -import json -import pdb -import torch -import torch.nn.functional as F - -def read_text(input_file): - arr = open(input_file).read().split("\n") - return arr[:-1] - - -class CausalLMModel: - def __init__(self): - self.model = None - self.tokenizer = None - self.debug = False - print("In CausalLMModel Constructor") - - def init_model(self,model_name = None): - # Get our models - The package will take care of downloading the models automatically - # For best performance: Muennighoff/SGPT-5.8B-weightedmean-nli-bitfit - if (self.debug): - print("Init model",model_name) - # For best performance: EleutherAI/gpt-j-6B - if (model_name is None): - model_name = "EleutherAI/gpt-neo-125M" - self.tokenizer = AutoTokenizer.from_pretrained(model_name) - self.model = AutoModelForCausalLM.from_pretrained(model_name) - self.model.eval() - self.prompt = 'Documents are searched to find matches with the same content.\nThe document "{}" is a good search result for "' - - def compute_embeddings(self,input_file_name,input_data,is_file): - if (self.debug): - print("Computing embeddings for:", input_data[:20]) - model = self.model - tokenizer = self.tokenizer - - texts = read_text(input_data) if is_file == True else input_data - query = texts[0] - docs = texts[1:] - - # Tokenize input texts - - #print(f"Query: {query}") - scores = [] - for doc in docs: - context = self.prompt.format(doc) - - context_enc = tokenizer.encode(context, add_special_tokens=False) - continuation_enc = tokenizer.encode(query, add_special_tokens=False) - # Slice off the last token, as we take its probability from the one before - model_input = torch.tensor(context_enc+continuation_enc[:-1]) - continuation_len = len(continuation_enc) - input_len, = model_input.shape - - # [seq_len] -> [seq_len, vocab] - logprobs = torch.nn.functional.log_softmax(model(model_input)[0], dim=-1).cpu() - # [seq_len, vocab] -> [continuation_len, vocab] - logprobs = logprobs[input_len-continuation_len:] - # Gather the log probabilities of the continuation tokens -> [continuation_len] - logprobs = torch.gather(logprobs, 1, torch.tensor(continuation_enc).unsqueeze(-1)).squeeze(-1) - score = torch.sum(logprobs) - scores.append(score.tolist()) - return texts,scores - - def output_results(self,output_file,texts,scores,main_index = 0): - cosine_dict = {} - docs = texts[1:] - if (self.debug): - print("Total sentences",len(texts)) - assert(len(scores) == len(docs)) - for i in range(len(docs)): - cosine_dict[docs[i]] = scores[i] - - if (self.debug): - print("Input sentence:",texts[main_index]) - sorted_dict = dict(sorted(cosine_dict.items(), key=lambda item: item[1],reverse = True)) - if (self.debug): - for key in sorted_dict: - print("Document score for \"%s\" is: %.3f" % (key[:100], sorted_dict[key])) - if (output_file is not None): - with open(output_file,"w") as fp: - fp.write(json.dumps(sorted_dict,indent=0)) - return sorted_dict - - -class SGPTQnAModel: - def __init__(self): - self.model = None - self.tokenizer = None - self.debug = False - print("In SGPT Q&A Constructor") - - - def init_model(self,model_name = None): - # Get our models - The package will take care of downloading the models automatically - # For best performance: Muennighoff/SGPT-5.8B-weightedmean-nli-bitfit - if (self.debug): - print("Init model",model_name) - if (model_name is None): - model_name = "Muennighoff/SGPT-125M-weightedmean-msmarco-specb-bitfit" - self.tokenizer = AutoTokenizer.from_pretrained(model_name) - self.model = AutoModel.from_pretrained(model_name) - self.model.eval() - self.SPECB_QUE_BOS = self.tokenizer.encode("[", add_special_tokens=False)[0] - self.SPECB_QUE_EOS = self.tokenizer.encode("]", add_special_tokens=False)[0] - - self.SPECB_DOC_BOS = self.tokenizer.encode("{", add_special_tokens=False)[0] - self.SPECB_DOC_EOS = self.tokenizer.encode("}", add_special_tokens=False)[0] - - - def tokenize_with_specb(self,texts, is_query): - # Tokenize without padding - batch_tokens = self.tokenizer(texts, padding=False, truncation=True) - # Add special brackets & pay attention to them - for seq, att in zip(batch_tokens["input_ids"], batch_tokens["attention_mask"]): - if is_query: - seq.insert(0, self.SPECB_QUE_BOS) - seq.append(self.SPECB_QUE_EOS) - else: - seq.insert(0, self.SPECB_DOC_BOS) - seq.append(self.SPECB_DOC_EOS) - att.insert(0, 1) - att.append(1) - # Add padding - batch_tokens = self.tokenizer.pad(batch_tokens, padding=True, return_tensors="pt") - return batch_tokens - - def get_weightedmean_embedding(self,batch_tokens, model): - # Get the embeddings - with torch.no_grad(): - # Get hidden state of shape [bs, seq_len, hid_dim] - last_hidden_state = self.model(**batch_tokens, output_hidden_states=True, return_dict=True).last_hidden_state - - # Get weights of shape [bs, seq_len, hid_dim] - weights = ( - torch.arange(start=1, end=last_hidden_state.shape[1] + 1) - .unsqueeze(0) - .unsqueeze(-1) - .expand(last_hidden_state.size()) - .float().to(last_hidden_state.device) - ) - - # Get attn mask of shape [bs, seq_len, hid_dim] - input_mask_expanded = ( - batch_tokens["attention_mask"] - .unsqueeze(-1) - .expand(last_hidden_state.size()) - .float() - ) - - # Perform weighted mean pooling across seq_len: bs, seq_len, hidden_dim -> bs, hidden_dim - sum_embeddings = torch.sum(last_hidden_state * input_mask_expanded * weights, dim=1) - sum_mask = torch.sum(input_mask_expanded * weights, dim=1) - - embeddings = sum_embeddings / sum_mask - - return embeddings - - def compute_embeddings(self,input_file_name,input_data,is_file): - if (self.debug): - print("Computing embeddings for:", input_data[:20]) - model = self.model - tokenizer = self.tokenizer - - texts = read_text(input_data) if is_file == True else input_data - - queries = [texts[0]] - docs = texts[1:] - query_embeddings = self.get_weightedmean_embedding(self.tokenize_with_specb(queries, is_query=True), self.model) - doc_embeddings = self.get_weightedmean_embedding(self.tokenize_with_specb(docs, is_query=False), self.model) - return texts,(query_embeddings,doc_embeddings) - - - - def output_results(self,output_file,texts,embeddings,main_index = 0): - # Calculate cosine similarities - # Cosine similarities are in [-1, 1]. Higher means more similar - query_embeddings = embeddings[0] - doc_embeddings = embeddings[1] - cosine_dict = {} - queries = [texts[0]] - docs = texts[1:] - if (self.debug): - print("Total sentences",len(texts)) - for i in range(len(docs)): - cosine_dict[docs[i]] = 1 - cosine(query_embeddings[0], doc_embeddings[i]) - - if (self.debug): - print("Input sentence:",texts[main_index]) - sorted_dict = dict(sorted(cosine_dict.items(), key=lambda item: item[1],reverse = True)) - if (self.debug): - for key in sorted_dict: - print("Cosine similarity with \"%s\" is: %.3f" % (key, sorted_dict[key])) - if (output_file is not None): - with open(output_file,"w") as fp: - fp.write(json.dumps(sorted_dict,indent=0)) - return sorted_dict - - -class SimCSEModel: - def __init__(self): - self.model = None - self.tokenizer = None - self.debug = False - print("In SimCSE constructor") - - def init_model(self,model_name = None): - if (model_name == None): - model_name = "princeton-nlp/sup-simcse-roberta-large" - #self.model = SimCSE(model_name) - self.tokenizer = AutoTokenizer.from_pretrained(model_name) - self.model = AutoModel.from_pretrained(model_name) - - def compute_embeddings(self,input_file_name,input_data,is_file): - texts = read_text(input_data) if is_file == True else input_data - inputs = self.tokenizer(texts, padding=True, truncation=True, return_tensors="pt") - with torch.no_grad(): - embeddings = self.model(**inputs, output_hidden_states=True, return_dict=True).pooler_output - return texts,embeddings - - def output_results(self,output_file,texts,embeddings,main_index = 0): - # Calculate cosine similarities - # Cosine similarities are in [-1, 1]. Higher means more similar - cosine_dict = {} - #print("Total sentences",len(texts)) - for i in range(len(texts)): - cosine_dict[texts[i]] = 1 - cosine(embeddings[main_index], embeddings[i]) - - #print("Input sentence:",texts[main_index]) - sorted_dict = dict(sorted(cosine_dict.items(), key=lambda item: item[1],reverse = True)) - if (self.debug): - for key in sorted_dict: - print("Cosine similarity with \"%s\" is: %.3f" % (key, sorted_dict[key])) - if (output_file is not None): - with open(output_file,"w") as fp: - fp.write(json.dumps(sorted_dict,indent=0)) - return sorted_dict - - - -class SGPTModel: - def __init__(self): - self.model = None - self.tokenizer = None - self.debug = False - print("In SGPT Constructor") - - - def init_model(self,model_name = None): - # Get our models - The package will take care of downloading the models automatically - # For best performance: Muennighoff/SGPT-5.8B-weightedmean-nli-bitfit - if (self.debug): - print("Init model",model_name) - if (model_name is None): - model_name = "Muennighoff/SGPT-125M-weightedmean-nli-bitfit" - self.tokenizer = AutoTokenizer.from_pretrained(model_name) - self.model = AutoModel.from_pretrained(model_name) - #self.tokenizer = AutoTokenizer.from_pretrained("Muennighoff/SGPT-1.3B-weightedmean-msmarco-specb-bitfit") - #self.model = AutoModel.from_pretrained("Muennighoff/SGPT-1.3B-weightedmean-msmarco-specb-bitfit") - #self.tokenizer = AutoTokenizer.from_pretrained("Muennighoff/SGPT-5.8B-weightedmean-msmarco-specb-bitfit") - #self.model = AutoModel.from_pretrained("Muennighoff/SGPT-5.8B-weightedmean-msmarco-specb-bitfit") - # Deactivate Dropout (There is no dropout in the above models so it makes no difference here but other SGPT models may have dropout) - self.model.eval() - - def compute_embeddings(self,input_file_name,input_data,is_file): - if (self.debug): - print("Computing embeddings for:", input_data[:20]) - model = self.model - tokenizer = self.tokenizer - - texts = read_text(input_data) if is_file == True else input_data - - # Tokenize input texts - batch_tokens = tokenizer(texts, padding=True, truncation=True, return_tensors="pt") - - # Get the embeddings - with torch.no_grad(): - # Get hidden state of shape [bs, seq_len, hid_dim] - last_hidden_state = model(**batch_tokens, output_hidden_states=True, return_dict=True).last_hidden_state - - # Get weights of shape [bs, seq_len, hid_dim] - weights = ( - torch.arange(start=1, end=last_hidden_state.shape[1] + 1) - .unsqueeze(0) - .unsqueeze(-1) - .expand(last_hidden_state.size()) - .float().to(last_hidden_state.device) - ) - - # Get attn mask of shape [bs, seq_len, hid_dim] - input_mask_expanded = ( - batch_tokens["attention_mask"] - .unsqueeze(-1) - .expand(last_hidden_state.size()) - .float() - ) - - # Perform weighted mean pooling across seq_len: bs, seq_len, hidden_dim -> bs, hidden_dim - sum_embeddings = torch.sum(last_hidden_state * input_mask_expanded * weights, dim=1) - sum_mask = torch.sum(input_mask_expanded * weights, dim=1) - - embeddings = sum_embeddings / sum_mask - return texts,embeddings - - def output_results(self,output_file,texts,embeddings,main_index = 0): - # Calculate cosine similarities - # Cosine similarities are in [-1, 1]. Higher means more similar - cosine_dict = {} - if (self.debug): - print("Total sentences",len(texts)) - for i in range(len(texts)): - cosine_dict[texts[i]] = 1 - cosine(embeddings[main_index], embeddings[i]) - - if (self.debug): - print("Input sentence:",texts[main_index]) - sorted_dict = dict(sorted(cosine_dict.items(), key=lambda item: item[1],reverse = True)) - if (self.debug): - for key in sorted_dict: - print("Cosine similarity with \"%s\" is: %.3f" % (key, sorted_dict[key])) - if (output_file is not None): - with open(output_file,"w") as fp: - fp.write(json.dumps(sorted_dict,indent=0)) - return sorted_dict - - - - - -class HFModel: - def __init__(self): - self.model = None - self.tokenizer = None - self.debug = False - print("In HF Constructor") - - - def init_model(self,model_name = None): - # Get our models - The package will take care of downloading the models automatically - # For best performance: Muennighoff/SGPT-5.8B-weightedmean-nli-bitfit - #print("Init model",model_name) - if (model_name is None): - model_name = "sentence-transformers/all-MiniLM-L6-v2" - self.tokenizer = AutoTokenizer.from_pretrained(model_name) - self.model = AutoModel.from_pretrained(model_name) - self.model.eval() - - def mean_pooling(self,model_output, attention_mask): - token_embeddings = model_output[0] #First element of model_output contains all token embeddings - input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() - return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) - - def compute_embeddings(self,input_file_name,input_data,is_file): - #print("Computing embeddings for:", input_data[:20]) - model = self.model - tokenizer = self.tokenizer - - texts = read_text(input_data) if is_file == True else input_data - - encoded_input = tokenizer(texts, padding=True, truncation=True, return_tensors='pt') - - # Compute token embeddings - with torch.no_grad(): - model_output = model(**encoded_input) - - # Perform pooling - sentence_embeddings = self.mean_pooling(model_output, encoded_input['attention_mask']) - - # Normalize embeddings - sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1) - - return texts,sentence_embeddings - - def output_results(self,output_file,texts,embeddings,main_index = 0): - # Calculate cosine similarities - # Cosine similarities are in [-1, 1]. Higher means more similar - cosine_dict = {} - #print("Total sentences",len(texts)) - for i in range(len(texts)): - cosine_dict[texts[i]] = 1 - cosine(embeddings[main_index], embeddings[i]) - - #print("Input sentence:",texts[main_index]) - sorted_dict = dict(sorted(cosine_dict.items(), key=lambda item: item[1],reverse = True)) - if (self.debug): - for key in sorted_dict: - print("Cosine similarity with \"%s\" is: %.3f" % (key, sorted_dict[key])) - if (output_file is not None): - with open(output_file,"w") as fp: - fp.write(json.dumps(sorted_dict,indent=0)) - return sorted_dict - - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='SGPT model for sentence embeddings ',formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('-input', action="store", dest="input",required=True,help="Input file with sentences") - parser.add_argument('-output', action="store", dest="output",default="output.txt",help="Output file with results") - parser.add_argument('-model', action="store", dest="model",default="sentence-transformers/all-MiniLM-L6-v2",help="model name") - - results = parser.parse_args() - obj = HFModel() - obj.init_model(results.model) - texts, embeddings = obj.compute_embeddings(results.input,results.input,is_file = True) - results = obj.output_results(results.output,texts,embeddings) diff --git a/spaces/valhalla/glide-text2im/glide_text2im/unet.py b/spaces/valhalla/glide-text2im/glide_text2im/unet.py deleted file mode 100644 index b61437a44ef7510e0c62afaae070deabc24c42bb..0000000000000000000000000000000000000000 --- a/spaces/valhalla/glide-text2im/glide_text2im/unet.py +++ /dev/null @@ -1,635 +0,0 @@ -import math -from abc import abstractmethod - -import torch as th -import torch.nn as nn -import torch.nn.functional as F - -from .fp16_util import convert_module_to_f16, convert_module_to_f32 -from .nn import avg_pool_nd, conv_nd, linear, normalization, timestep_embedding, zero_module - - -class TimestepBlock(nn.Module): - """ - Any module where forward() takes timestep embeddings as a second argument. - """ - - @abstractmethod - def forward(self, x, emb): - """ - Apply the module to `x` given `emb` timestep embeddings. - """ - - -class TimestepEmbedSequential(nn.Sequential, TimestepBlock): - """ - A sequential module that passes timestep embeddings to the children that - support it as an extra input. - """ - - def forward(self, x, emb, encoder_out=None): - for layer in self: - if isinstance(layer, TimestepBlock): - x = layer(x, emb) - elif isinstance(layer, AttentionBlock): - x = layer(x, encoder_out) - else: - x = layer(x) - return x - - -class Upsample(nn.Module): - """ - An upsampling layer with an optional convolution. - - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - upsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - if use_conv: - self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1) - - def forward(self, x): - assert x.shape[1] == self.channels - if self.dims == 3: - x = F.interpolate(x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest") - else: - x = F.interpolate(x, scale_factor=2, mode="nearest") - if self.use_conv: - x = self.conv(x) - return x - - -class Downsample(nn.Module): - """ - A downsampling layer with an optional convolution. - - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - downsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - stride = 2 if dims != 3 else (1, 2, 2) - if use_conv: - self.op = conv_nd(dims, self.channels, self.out_channels, 3, stride=stride, padding=1) - else: - assert self.channels == self.out_channels - self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) - - def forward(self, x): - assert x.shape[1] == self.channels - return self.op(x) - - -class ResBlock(TimestepBlock): - """ - A residual block that can optionally change the number of channels. - - :param channels: the number of input channels. - :param emb_channels: the number of timestep embedding channels. - :param dropout: the rate of dropout. - :param out_channels: if specified, the number of out channels. - :param use_conv: if True and out_channels is specified, use a spatial - convolution instead of a smaller 1x1 convolution to change the - channels in the skip connection. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param use_checkpoint: if True, use gradient checkpointing on this module. - :param up: if True, use this block for upsampling. - :param down: if True, use this block for downsampling. - """ - - def __init__( - self, - channels, - emb_channels, - dropout, - out_channels=None, - use_conv=False, - use_scale_shift_norm=False, - dims=2, - use_checkpoint=False, - up=False, - down=False, - ): - super().__init__() - self.channels = channels - self.emb_channels = emb_channels - self.dropout = dropout - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_checkpoint = use_checkpoint - self.use_scale_shift_norm = use_scale_shift_norm - - self.in_layers = nn.Sequential( - normalization(channels, swish=1.0), - nn.Identity(), - conv_nd(dims, channels, self.out_channels, 3, padding=1), - ) - - self.updown = up or down - - if up: - self.h_upd = Upsample(channels, False, dims) - self.x_upd = Upsample(channels, False, dims) - elif down: - self.h_upd = Downsample(channels, False, dims) - self.x_upd = Downsample(channels, False, dims) - else: - self.h_upd = self.x_upd = nn.Identity() - - self.emb_layers = nn.Sequential( - nn.SiLU(), - linear( - emb_channels, - 2 * self.out_channels if use_scale_shift_norm else self.out_channels, - ), - ) - self.out_layers = nn.Sequential( - normalization(self.out_channels, swish=0.0 if use_scale_shift_norm else 1.0), - nn.SiLU() if use_scale_shift_norm else nn.Identity(), - nn.Dropout(p=dropout), - zero_module(conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)), - ) - - if self.out_channels == channels: - self.skip_connection = nn.Identity() - elif use_conv: - self.skip_connection = conv_nd(dims, channels, self.out_channels, 3, padding=1) - else: - self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) - - def forward(self, x, emb): - """ - Apply the block to a Tensor, conditioned on a timestep embedding. - - :param x: an [N x C x ...] Tensor of features. - :param emb: an [N x emb_channels] Tensor of timestep embeddings. - :return: an [N x C x ...] Tensor of outputs. - """ - if self.updown: - in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] - h = in_rest(x) - h = self.h_upd(h) - x = self.x_upd(x) - h = in_conv(h) - else: - h = self.in_layers(x) - emb_out = self.emb_layers(emb).type(h.dtype) - while len(emb_out.shape) < len(h.shape): - emb_out = emb_out[..., None] - if self.use_scale_shift_norm: - out_norm, out_rest = self.out_layers[0], self.out_layers[1:] - scale, shift = th.chunk(emb_out, 2, dim=1) - h = out_norm(h) * (1 + scale) + shift - h = out_rest(h) - else: - h = h + emb_out - h = self.out_layers(h) - return self.skip_connection(x) + h - - -class AttentionBlock(nn.Module): - """ - An attention block that allows spatial positions to attend to each other. - - Originally ported from here, but adapted to the N-d case. - https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. - """ - - def __init__( - self, - channels, - num_heads=1, - num_head_channels=-1, - use_checkpoint=False, - encoder_channels=None, - ): - super().__init__() - self.channels = channels - if num_head_channels == -1: - self.num_heads = num_heads - else: - assert ( - channels % num_head_channels == 0 - ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" - self.num_heads = channels // num_head_channels - self.use_checkpoint = use_checkpoint - self.norm = normalization(channels, swish=0.0) - self.qkv = conv_nd(1, channels, channels * 3, 1) - self.attention = QKVAttention(self.num_heads) - - if encoder_channels is not None: - self.encoder_kv = conv_nd(1, encoder_channels, channels * 2, 1) - self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) - - def forward(self, x, encoder_out=None): - b, c, *spatial = x.shape - qkv = self.qkv(self.norm(x).view(b, c, -1)) - if encoder_out is not None: - encoder_out = self.encoder_kv(encoder_out) - h = self.attention(qkv, encoder_out) - else: - h = self.attention(qkv) - h = self.proj_out(h) - return x + h.reshape(b, c, *spatial) - - -class QKVAttention(nn.Module): - """ - A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv, encoder_kv=None): - """ - Apply QKV attention. - - :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) - if encoder_kv is not None: - assert encoder_kv.shape[1] == self.n_heads * ch * 2 - ek, ev = encoder_kv.reshape(bs * self.n_heads, ch * 2, -1).split(ch, dim=1) - k = th.cat([ek, k], dim=-1) - v = th.cat([ev, v], dim=-1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = th.einsum( - "bct,bcs->bts", q * scale, k * scale - ) # More stable with f16 than dividing afterwards - weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum("bts,bcs->bct", weight, v) - return a.reshape(bs, -1, length) - - -class UNetModel(nn.Module): - """ - The full UNet model with attention and timestep embedding. - - :param in_channels: channels in the input Tensor. - :param model_channels: base channel count for the model. - :param out_channels: channels in the output Tensor. - :param num_res_blocks: number of residual blocks per downsample. - :param attention_resolutions: a collection of downsample rates at which - attention will take place. May be a set, list, or tuple. - For example, if this contains 4, then at 4x downsampling, attention - will be used. - :param dropout: the dropout probability. - :param channel_mult: channel multiplier for each level of the UNet. - :param conv_resample: if True, use learned convolutions for upsampling and - downsampling. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param num_classes: if specified (as an int), then this model will be - class-conditional with `num_classes` classes. - :param use_checkpoint: use gradient checkpointing to reduce memory usage. - :param num_heads: the number of attention heads in each attention layer. - :param num_heads_channels: if specified, ignore num_heads and instead use - a fixed channel width per attention head. - :param num_heads_upsample: works with num_heads to set a different number - of heads for upsampling. Deprecated. - :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. - :param resblock_updown: use residual blocks for up/downsampling. - """ - - def __init__( - self, - in_channels, - model_channels, - out_channels, - num_res_blocks, - attention_resolutions, - dropout=0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - num_classes=None, - use_checkpoint=False, - use_fp16=False, - num_heads=1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - resblock_updown=False, - encoder_channels=None, - ): - super().__init__() - - if num_heads_upsample == -1: - num_heads_upsample = num_heads - - self.in_channels = in_channels - self.model_channels = model_channels - self.out_channels = out_channels - self.num_res_blocks = num_res_blocks - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.num_classes = num_classes - self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.num_heads = num_heads - self.num_head_channels = num_head_channels - self.num_heads_upsample = num_heads_upsample - - time_embed_dim = model_channels * 4 - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - - if self.num_classes is not None: - self.label_emb = nn.Embedding(num_classes, time_embed_dim) - - ch = input_ch = int(channel_mult[0] * model_channels) - self.input_blocks = nn.ModuleList( - [TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))] - ) - self._feature_size = ch - input_block_chans = [ch] - ds = 1 - for level, mult in enumerate(channel_mult): - for _ in range(num_res_blocks): - layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=int(mult * model_channels), - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = int(mult * model_channels) - if ds in attention_resolutions: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - encoder_channels=encoder_channels, - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) - if resblock_updown - else Downsample(ch, conv_resample, dims=dims, out_channels=out_ch) - ) - ) - ch = out_ch - input_block_chans.append(ch) - ds *= 2 - self._feature_size += ch - - self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - encoder_channels=encoder_channels, - ), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - ) - self._feature_size += ch - - self.output_blocks = nn.ModuleList([]) - for level, mult in list(enumerate(channel_mult))[::-1]: - for i in range(num_res_blocks + 1): - ich = input_block_chans.pop() - layers = [ - ResBlock( - ch + ich, - time_embed_dim, - dropout, - out_channels=int(model_channels * mult), - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = int(model_channels * mult) - if ds in attention_resolutions: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads_upsample, - num_head_channels=num_head_channels, - encoder_channels=encoder_channels, - ) - ) - if level and i == num_res_blocks: - out_ch = ch - layers.append( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - up=True, - ) - if resblock_updown - else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) - ) - ds //= 2 - self.output_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - - self.out = nn.Sequential( - normalization(ch, swish=1.0), - nn.Identity(), - zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)), - ) - self.use_fp16 = use_fp16 - - def convert_to_fp16(self): - """ - Convert the torso of the model to float16. - """ - self.input_blocks.apply(convert_module_to_f16) - self.middle_block.apply(convert_module_to_f16) - self.output_blocks.apply(convert_module_to_f16) - - def convert_to_fp32(self): - """ - Convert the torso of the model to float32. - """ - self.input_blocks.apply(convert_module_to_f32) - self.middle_block.apply(convert_module_to_f32) - self.output_blocks.apply(convert_module_to_f32) - - def forward(self, x, timesteps, y=None): - """ - Apply the model to an input batch. - - :param x: an [N x C x ...] Tensor of inputs. - :param timesteps: a 1-D batch of timesteps. - :param y: an [N] Tensor of labels, if class-conditional. - :return: an [N x C x ...] Tensor of outputs. - """ - assert (y is not None) == ( - self.num_classes is not None - ), "must specify y if and only if the model is class-conditional" - - hs = [] - emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) - - if self.num_classes is not None: - assert y.shape == (x.shape[0],) - emb = emb + self.label_emb(y) - - h = x.type(self.dtype) - for module in self.input_blocks: - h = module(h, emb) - hs.append(h) - h = self.middle_block(h, emb) - for module in self.output_blocks: - h = th.cat([h, hs.pop()], dim=1) - h = module(h, emb) - h = h.type(x.dtype) - return self.out(h) - -class SuperResUNetModel(UNetModel): - """ - A UNetModel that performs super-resolution. - - Expects an extra kwarg `low_res` to condition on a low-resolution image. - """ - - def __init__(self, *args, **kwargs): - if "in_channels" in kwargs: - kwargs = dict(kwargs) - kwargs["in_channels"] = kwargs["in_channels"] * 2 - else: - # Curse you, Python. Or really, just curse positional arguments :|. - args = list(args) - args[1] = args[1] * 2 - super().__init__(*args, **kwargs) - - def forward(self, x, timesteps, low_res=None, **kwargs): - _, _, new_height, new_width = x.shape - upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear") - x = th.cat([x, upsampled], dim=1) - return super().forward(x, timesteps, **kwargs) - - -class InpaintUNetModel(UNetModel): - """ - A UNetModel which can perform inpainting. - """ - - def __init__(self, *args, **kwargs): - if "in_channels" in kwargs: - kwargs = dict(kwargs) - kwargs["in_channels"] = kwargs["in_channels"] * 2 + 1 - else: - # Curse you, Python. Or really, just curse positional arguments :|. - args = list(args) - args[1] = args[1] * 2 + 1 - super().__init__(*args, **kwargs) - - def forward(self, x, timesteps, inpaint_image=None, inpaint_mask=None, **kwargs): - if inpaint_image is None: - inpaint_image = th.zeros_like(x) - if inpaint_mask is None: - inpaint_mask = th.zeros_like(x[:, :1]) - return super().forward( - th.cat([x, inpaint_image * inpaint_mask, inpaint_mask], dim=1), - timesteps, - **kwargs, - ) - - -class SuperResInpaintUNetModel(UNetModel): - """ - A UNetModel which can perform both upsampling and inpainting. - """ - - def __init__(self, *args, **kwargs): - if "in_channels" in kwargs: - kwargs = dict(kwargs) - kwargs["in_channels"] = kwargs["in_channels"] * 3 + 1 - else: - # Curse you, Python. Or really, just curse positional arguments :|. - args = list(args) - args[1] = args[1] * 3 + 1 - super().__init__(*args, **kwargs) - - def forward( - self, - x, - timesteps, - inpaint_image=None, - inpaint_mask=None, - low_res=None, - **kwargs, - ): - if inpaint_image is None: - inpaint_image = th.zeros_like(x) - if inpaint_mask is None: - inpaint_mask = th.zeros_like(x[:, :1]) - _, _, new_height, new_width = x.shape - upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear") - return super().forward( - th.cat([x, inpaint_image * inpaint_mask, inpaint_mask, upsampled], dim=1), - timesteps, - **kwargs, - ) diff --git a/spaces/vinthony/SadTalker/src/audio2pose_models/cvae.py b/spaces/vinthony/SadTalker/src/audio2pose_models/cvae.py deleted file mode 100644 index d017ce865a03bae40dfe066dbcd82e29839d89dc..0000000000000000000000000000000000000000 --- a/spaces/vinthony/SadTalker/src/audio2pose_models/cvae.py +++ /dev/null @@ -1,149 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn -from src.audio2pose_models.res_unet import ResUnet - -def class2onehot(idx, class_num): - - assert torch.max(idx).item() < class_num - onehot = torch.zeros(idx.size(0), class_num).to(idx.device) - onehot.scatter_(1, idx, 1) - return onehot - -class CVAE(nn.Module): - def __init__(self, cfg): - super().__init__() - encoder_layer_sizes = cfg.MODEL.CVAE.ENCODER_LAYER_SIZES - decoder_layer_sizes = cfg.MODEL.CVAE.DECODER_LAYER_SIZES - latent_size = cfg.MODEL.CVAE.LATENT_SIZE - num_classes = cfg.DATASET.NUM_CLASSES - audio_emb_in_size = cfg.MODEL.CVAE.AUDIO_EMB_IN_SIZE - audio_emb_out_size = cfg.MODEL.CVAE.AUDIO_EMB_OUT_SIZE - seq_len = cfg.MODEL.CVAE.SEQ_LEN - - self.latent_size = latent_size - - self.encoder = ENCODER(encoder_layer_sizes, latent_size, num_classes, - audio_emb_in_size, audio_emb_out_size, seq_len) - self.decoder = DECODER(decoder_layer_sizes, latent_size, num_classes, - audio_emb_in_size, audio_emb_out_size, seq_len) - def reparameterize(self, mu, logvar): - std = torch.exp(0.5 * logvar) - eps = torch.randn_like(std) - return mu + eps * std - - def forward(self, batch): - batch = self.encoder(batch) - mu = batch['mu'] - logvar = batch['logvar'] - z = self.reparameterize(mu, logvar) - batch['z'] = z - return self.decoder(batch) - - def test(self, batch): - ''' - class_id = batch['class'] - z = torch.randn([class_id.size(0), self.latent_size]).to(class_id.device) - batch['z'] = z - ''' - return self.decoder(batch) - -class ENCODER(nn.Module): - def __init__(self, layer_sizes, latent_size, num_classes, - audio_emb_in_size, audio_emb_out_size, seq_len): - super().__init__() - - self.resunet = ResUnet() - self.num_classes = num_classes - self.seq_len = seq_len - - self.MLP = nn.Sequential() - layer_sizes[0] += latent_size + seq_len*audio_emb_out_size + 6 - for i, (in_size, out_size) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])): - self.MLP.add_module( - name="L{:d}".format(i), module=nn.Linear(in_size, out_size)) - self.MLP.add_module(name="A{:d}".format(i), module=nn.ReLU()) - - self.linear_means = nn.Linear(layer_sizes[-1], latent_size) - self.linear_logvar = nn.Linear(layer_sizes[-1], latent_size) - self.linear_audio = nn.Linear(audio_emb_in_size, audio_emb_out_size) - - self.classbias = nn.Parameter(torch.randn(self.num_classes, latent_size)) - - def forward(self, batch): - class_id = batch['class'] - pose_motion_gt = batch['pose_motion_gt'] #bs seq_len 6 - ref = batch['ref'] #bs 6 - bs = pose_motion_gt.shape[0] - audio_in = batch['audio_emb'] # bs seq_len audio_emb_in_size - - #pose encode - pose_emb = self.resunet(pose_motion_gt.unsqueeze(1)) #bs 1 seq_len 6 - pose_emb = pose_emb.reshape(bs, -1) #bs seq_len*6 - - #audio mapping - print(audio_in.shape) - audio_out = self.linear_audio(audio_in) # bs seq_len audio_emb_out_size - audio_out = audio_out.reshape(bs, -1) - - class_bias = self.classbias[class_id] #bs latent_size - x_in = torch.cat([ref, pose_emb, audio_out, class_bias], dim=-1) #bs seq_len*(audio_emb_out_size+6)+latent_size - x_out = self.MLP(x_in) - - mu = self.linear_means(x_out) - logvar = self.linear_means(x_out) #bs latent_size - - batch.update({'mu':mu, 'logvar':logvar}) - return batch - -class DECODER(nn.Module): - def __init__(self, layer_sizes, latent_size, num_classes, - audio_emb_in_size, audio_emb_out_size, seq_len): - super().__init__() - - self.resunet = ResUnet() - self.num_classes = num_classes - self.seq_len = seq_len - - self.MLP = nn.Sequential() - input_size = latent_size + seq_len*audio_emb_out_size + 6 - for i, (in_size, out_size) in enumerate(zip([input_size]+layer_sizes[:-1], layer_sizes)): - self.MLP.add_module( - name="L{:d}".format(i), module=nn.Linear(in_size, out_size)) - if i+1 < len(layer_sizes): - self.MLP.add_module(name="A{:d}".format(i), module=nn.ReLU()) - else: - self.MLP.add_module(name="sigmoid", module=nn.Sigmoid()) - - self.pose_linear = nn.Linear(6, 6) - self.linear_audio = nn.Linear(audio_emb_in_size, audio_emb_out_size) - - self.classbias = nn.Parameter(torch.randn(self.num_classes, latent_size)) - - def forward(self, batch): - - z = batch['z'] #bs latent_size - bs = z.shape[0] - class_id = batch['class'] - ref = batch['ref'] #bs 6 - audio_in = batch['audio_emb'] # bs seq_len audio_emb_in_size - #print('audio_in: ', audio_in[:, :, :10]) - - audio_out = self.linear_audio(audio_in) # bs seq_len audio_emb_out_size - #print('audio_out: ', audio_out[:, :, :10]) - audio_out = audio_out.reshape([bs, -1]) # bs seq_len*audio_emb_out_size - class_bias = self.classbias[class_id] #bs latent_size - - z = z + class_bias - x_in = torch.cat([ref, z, audio_out], dim=-1) - x_out = self.MLP(x_in) # bs layer_sizes[-1] - x_out = x_out.reshape((bs, self.seq_len, -1)) - - #print('x_out: ', x_out) - - pose_emb = self.resunet(x_out.unsqueeze(1)) #bs 1 seq_len 6 - - pose_motion_pred = self.pose_linear(pose_emb.squeeze(1)) #bs seq_len 6 - - batch.update({'pose_motion_pred':pose_motion_pred}) - return batch diff --git a/spaces/vinthony/SadTalker/src/face3d/data/template_dataset.py b/spaces/vinthony/SadTalker/src/face3d/data/template_dataset.py deleted file mode 100644 index bfdf16be2a8a834b204c45d88c86857b37b9bd25..0000000000000000000000000000000000000000 --- a/spaces/vinthony/SadTalker/src/face3d/data/template_dataset.py +++ /dev/null @@ -1,75 +0,0 @@ -"""Dataset class template - -This module provides a template for users to implement custom datasets. -You can specify '--dataset_mode template' to use this dataset. -The class name should be consistent with both the filename and its dataset_mode option. -The filename should be _dataset.py -The class name should be Dataset.py -You need to implement the following functions: - -- : Add dataset-specific options and rewrite default values for existing options. - -- <__init__>: Initialize this dataset class. - -- <__getitem__>: Return a data point and its metadata information. - -- <__len__>: Return the number of images. -""" -from data.base_dataset import BaseDataset, get_transform -# from data.image_folder import make_dataset -# from PIL import Image - - -class TemplateDataset(BaseDataset): - """A template dataset class for you to implement custom datasets.""" - @staticmethod - def modify_commandline_options(parser, is_train): - """Add new dataset-specific options, and rewrite default values for existing options. - - Parameters: - parser -- original option parser - is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. - - Returns: - the modified parser. - """ - parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option') - parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values - return parser - - def __init__(self, opt): - """Initialize this dataset class. - - Parameters: - opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions - - A few things can be done here. - - save the options (have been done in BaseDataset) - - get image paths and meta information of the dataset. - - define the image transformation. - """ - # save the option and dataset root - BaseDataset.__init__(self, opt) - # get the image paths of your dataset; - self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root - # define the default transform function. You can use ; You can also define your custom transform function - self.transform = get_transform(opt) - - def __getitem__(self, index): - """Return a data point and its metadata information. - - Parameters: - index -- a random integer for data indexing - - Returns: - a dictionary of data with their names. It usually contains the data itself and its metadata information. - - Step 1: get a random image path: e.g., path = self.image_paths[index] - Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB'). - Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image) - Step 4: return a data point as a dictionary. - """ - path = 'temp' # needs to be a string - data_A = None # needs to be a tensor - data_B = None # needs to be a tensor - return {'data_A': data_A, 'data_B': data_B, 'path': path} - - def __len__(self): - """Return the total number of images.""" - return len(self.image_paths) diff --git a/spaces/visheratin/laion-nllb/lang_map.py b/spaces/visheratin/laion-nllb/lang_map.py deleted file mode 100644 index ddbbfef0cf3fedb10b18d9d2ae9993efd73e1e96..0000000000000000000000000000000000000000 --- a/spaces/visheratin/laion-nllb/lang_map.py +++ /dev/null @@ -1,203 +0,0 @@ -langs = { - "Acehnese (Arabic script)": "ace_Arab", - "Acehnese (Latin script)": "ace_Latn", - "Mesopotamian Arabic": "acm_Arab", - "Ta'izzi-Adeni Arabic": "acq_Arab", - "Tunisian Arabic": "aeb_Arab", - "Afrikaans": "afr_Latn", - "South Levantine Arabic": "ajp_Arab", - "Akan": "aka_Latn", - "Amharic": "amh_Ethi", - "North Levantine Arabic": "apc_Arab", - "Modern Standard Arabic": "arb_Arab", - "Najdi Arabic": "ars_Arab", - "Moroccan Arabic": "ary_Arab", - "Egyptian Arabic": "arz_Arab", - "Assamese": "asm_Beng", - "Asturian": "ast_Latn", - "Awadhi": "awa_Deva", - "Central Aymara": "ayr_Latn", - "South Azerbaijani": "azb_Arab", - "North Azerbaijani": "azj_Latn", - "Bashkir": "bak_Cyrl", - "Bambara": "bam_Latn", - "Balinese": "ban_Latn", - "Belarusian": "bel_Cyrl", - "Bemba": "bem_Latn", - "Bengali": "ben_Beng", - "Bhojpuri": "bho_Deva", - "Banjar (Arabic script)": "bjn_Arab", - "Banjar (Latin script)": "bjn_Latn", - "Standard Tibetan": "bod_Tibt", - "Bosnian": "bos_Latn", - "Buginese": "bug_Latn", - "Bulgarian": "bul_Cyrl", - "Catalan": "cat_Latn", - "Cebuano": "ceb_Latn", - "Czech": "ces_Latn", - "Chokwe": "cjk_Latn", - "Central Kurdish": "ckb_Arab", - "Crimean Tatar": "crh_Latn", - "Welsh": "cym_Latn", - "Danish": "dan_Latn", - "German": "deu_Latn", - "Southwestern Dinka": "dik_Latn", - "Dyula": "dyu_Latn", - "Dzongkha": "dzo_Tibt", - "Greek": "ell_Grek", - "English": "eng_Latn", - "Esperanto": "epo_Latn", - "Estonian": "est_Latn", - "Basque": "eus_Latn", - "Ewe": "ewe_Latn", - "Faroese": "fao_Latn", - "Fijian": "fij_Latn", - "Finnish": "fin_Latn", - "Fon": "fon_Latn", - "French": "fra_Latn", - "Friulian": "fur_Latn", - "Nigerian Fulfulde": "fuv_Latn", - "Scottish Gaelic": "gla_Latn", - "Irish": "gle_Latn", - "Galician": "glg_Latn", - "Guarani": "grn_Latn", - "Gujarati": "guj_Gujr", - "Haitian Creole": "hat_Latn", - "Hausa": "hau_Latn", - "Hebrew": "heb_Hebr", - "Hindi": "hin_Deva", - "Chhattisgarhi": "hne_Deva", - "Croatian": "hrv_Latn", - "Hungarian": "hun_Latn", - "Armenian": "hye_Armn", - "Igbo": "ibo_Latn", - "Ilocano": "ilo_Latn", - "Indonesian": "ind_Latn", - "Icelandic": "isl_Latn", - "Italian": "ita_Latn", - "Javanese": "jav_Latn", - "Japanese": "jpn_Jpan", - "Kabyle": "kab_Latn", - "Jingpho": "kac_Latn", - "Kamba": "kam_Latn", - "Kannada": "kan_Knda", - "Kashmiri (Arabic script)": "kas_Arab", - "Kashmiri (Devanagari script)": "kas_Deva", - "Georgian": "kat_Geor", - "Central Kanuri (Arabic script)": "knc_Arab", - "Central Kanuri (Latin script)": "knc_Latn", - "Kazakh": "kaz_Cyrl", - "Kabiyè": "kbp_Latn", - "Kabuverdianu": "kea_Latn", - "Khmer": "khm_Khmr", - "Kikuyu": "kik_Latn", - "Kinyarwanda": "kin_Latn", - "Kyrgyz": "kir_Cyrl", - "Kimbundu": "kmb_Latn", - "Northern Kurdish": "kmr_Latn", - "Kikongo": "kon_Latn", - "Korean": "kor_Hang", - "Lao": "lao_Laoo", - "Ligurian": "lij_Latn", - "Limburgish": "lim_Latn", - "Lingala": "lin_Latn", - "Lithuanian": "lit_Latn", - "Lombard": "lmo_Latn", - "Latgalian": "ltg_Latn", - "Luxembourgish": "ltz_Latn", - "Luba-Kasai": "lua_Latn", - "Ganda": "lug_Latn", - "Luo": "luo_Latn", - "Mizo": "lus_Latn", - "Standard Latvian": "lvs_Latn", - "Magahi": "mag_Deva", - "Maithili": "mai_Deva", - "Malayalam": "mal_Mlym", - "Marathi": "mar_Deva", - "Minangkabau (Latin script)": "min_Latn", - "Macedonian": "mkd_Cyrl", - "Plateau Malagasy": "plt_Latn", - "Maltese": "mlt_Latn", - "Meitei (Bengali script)": "mni_Beng", - "Halh Mongolian": "khk_Cyrl", - "Mossi": "mos_Latn", - "Maori": "mri_Latn", - "Burmese": "mya_Mymr", - "Dutch": "nld_Latn", - "Norwegian Nynorsk": "nno_Latn", - "Norwegian Bokmål": "nob_Latn", - "Nepali": "npi_Deva", - "Northern Sotho": "nso_Latn", - "Nuer": "nus_Latn", - "Nyanja": "nya_Latn", - "Occitan": "oci_Latn", - "West Central Oromo": "gaz_Latn", - "Odia": "ory_Orya", - "Pangasinan": "pag_Latn", - "Eastern Panjabi": "pan_Guru", - "Papiamento": "pap_Latn", - "Western Persian": "pes_Arab", - "Polish": "pol_Latn", - "Portuguese": "por_Latn", - "Dari": "prs_Arab", - "Southern Pashto": "pbt_Arab", - "Ayacucho Quechua": "quy_Latn", - "Romanian": "ron_Latn", - "Rundi": "run_Latn", - "Russian": "rus_Cyrl", - "Sango": "sag_Latn", - "Sanskrit": "san_Deva", - "Sicilian": "scn_Latn", - "Shan": "shn_Mymr", - "Sinhala": "sin_Sinh", - "Slovak": "slk_Latn", - "Slovenian": "slv_Latn", - "Samoan": "smo_Latn", - "Shona": "sna_Latn", - "Sindhi": "snd_Arab", - "Somali": "som_Latn", - "Southern Sotho": "sot_Latn", - "Spanish": "spa_Latn", - "Tosk Albanian": "als_Latn", - "Sardinian": "srd_Latn", - "Serbian": "srp_Cyrl", - "Swati": "ssw_Latn", - "Sundanese": "sun_Latn", - "Swedish": "swe_Latn", - "Swahili": "swh_Latn", - "Silesian": "szl_Latn", - "Tamil": "tam_Taml", - "Tatar": "tat_Cyrl", - "Telugu": "tel_Telu", - "Tajik": "tgk_Cyrl", - "Tagalog": "tgl_Latn", - "Thai": "tha_Thai", - "Tigrinya": "tir_Ethi", - "Tamasheq (Latin script)": "taq_Latn", - "Tamasheq (Tifinagh script)": "taq_Tfng", - "Tok Pisin": "tpi_Latn", - "Tswana": "tsn_Latn", - "Tsonga": "tso_Latn", - "Turkmen": "tuk_Latn", - "Tumbuka": "tum_Latn", - "Turkish": "tur_Latn", - "Twi": "twi_Latn", - "Central Atlas Tamazight": "tzm_Tfng", - "Uyghur": "uig_Arab", - "Ukrainian": "ukr_Cyrl", - "Umbundu": "umb_Latn", - "Urdu": "urd_Arab", - "Northern Uzbek": "uzn_Latn", - "Venetian": "vec_Latn", - "Vietnamese": "vie_Latn", - "Waray": "war_Latn", - "Wolof": "wol_Latn", - "Xhosa": "xho_Latn", - "Eastern Yiddish": "ydd_Hebr", - "Yoruba": "yor_Latn", - "Yue Chinese": "yue_Hant", - "Chinese (Simplified)": "zho_Hans", - "Chinese (Traditional)": "zho_Hant", - "Standard Malay": "zsm_Latn", - "Zulu": "zul_Latn", -} diff --git a/spaces/vivien/semanticsearch/app.py b/spaces/vivien/semanticsearch/app.py deleted file mode 100644 index 6d33c35bd063a8aef2ddf0ed29a84efa3b9fc1de..0000000000000000000000000000000000000000 --- a/spaces/vivien/semanticsearch/app.py +++ /dev/null @@ -1,146 +0,0 @@ -import time -import re -import pandas as pd -import numpy as np -import torch -import torch.nn.functional as F -from transformers import AutoTokenizer, AutoModel -from tokenizers import Tokenizer, AddedToken -import streamlit as st -from st_click_detector import click_detector - -DEVICE = "cpu" -MODEL_OPTIONS = ["msmarco-distilbert-base-tas-b", "all-mpnet-base-v2"] -DESCRIPTION = """ -# Semantic search - -**Enter your query and hit enter** - -Built with 🤗 Hugging Face's [transformers](https://huggingface.co/transformers/) library, [SentenceBert](https://www.sbert.net/) models, [Streamlit](https://streamlit.io/) and 44k movie descriptions from the Kaggle [Movies Dataset](https://www.kaggle.com/rounakbanik/the-movies-dataset) -""" - - -@st.cache( - show_spinner=False, - hash_funcs={ - AutoModel: lambda _: None, - AutoTokenizer: lambda _: None, - dict: lambda _: None, - }, -) -def load(): - models, tokenizers, embeddings = [], [], [] - for model_option in MODEL_OPTIONS: - tokenizers.append( - AutoTokenizer.from_pretrained(f"sentence-transformers/{model_option}") - ) - models.append( - AutoModel.from_pretrained(f"sentence-transformers/{model_option}").to( - DEVICE - ) - ) - embeddings.append(np.load("embeddings.npy")) - embeddings.append(np.load("embeddings2.npy")) - df = pd.read_csv("movies.csv") - return tokenizers, models, embeddings, df - - -tokenizers, models, embeddings, df = load() - - -def pooling(model_output): - return model_output.last_hidden_state[:, 0] - - -def compute_embeddings(texts): - encoded_input = tokenizers[0]( - texts, padding=True, truncation=True, return_tensors="pt" - ).to(DEVICE) - - with torch.no_grad(): - model_output = models[0](**encoded_input, return_dict=True) - - embeddings = pooling(model_output) - - return embeddings.cpu().numpy() - - -def pooling2(model_output, attention_mask): - token_embeddings = model_output[0] - input_mask_expanded = ( - attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() - ) - return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp( - input_mask_expanded.sum(1), min=1e-9 - ) - - -def compute_embeddings2(list_of_strings): - encoded_input = tokenizers[1]( - list_of_strings, padding=True, truncation=True, return_tensors="pt" - ).to(DEVICE) - with torch.no_grad(): - model_output = models[1](**encoded_input) - sentence_embeddings = pooling2(model_output, encoded_input["attention_mask"]) - return F.normalize(sentence_embeddings, p=2, dim=1).cpu().numpy() - - -@st.cache( - show_spinner=False, - hash_funcs={Tokenizer: lambda _: None, AddedToken: lambda _: None}, -) -def semantic_search(query, model_id): - start = time.time() - if len(query.strip()) == 0: - return "" - if "[Similar:" not in query: - if model_id == 0: - query_embedding = compute_embeddings([query]) - else: - query_embedding = compute_embeddings2([query]) - else: - match = re.match(r"\[Similar:(\d{1,5}).*", query) - if match: - idx = int(match.groups()[0]) - query_embedding = embeddings[model_id][idx : idx + 1, :] - if query_embedding.shape[0] == 0: - return "" - else: - return "" - indices = np.argsort(embeddings[model_id] @ np.transpose(query_embedding)[:, 0])[ - -1:-11:-1 - ] - if len(indices) == 0: - return "" - result = "
                  " - for i in indices: - result += f"
                1. {df.iloc[i].title} ({df.iloc[i].release_date}). {df.iloc[i].overview} " - result += f"Similar movies
                2. " - delay = "%.3f" % (time.time() - start) - return f"

                  Computation time: {delay} seconds

                  {result}
                " - - -st.sidebar.markdown(DESCRIPTION) - -model_choice = st.sidebar.selectbox("Similarity model", options=MODEL_OPTIONS) -model_id = 0 if model_choice == MODEL_OPTIONS[0] else 1 - -if "query" in st.session_state: - query = st.text_input("", value=st.session_state["query"]) -else: - query = st.text_input("", value="time travel") - -clicked = click_detector(semantic_search(query, model_id)) - -if clicked != "": - change_query = False - if "last_clicked" not in st.session_state: - st.session_state["last_clicked"] = clicked - change_query = True - else: - if clicked != st.session_state["last_clicked"]: - st.session_state["last_clicked"] = clicked - change_query = True - if change_query: - st.session_state["query"] = f"[Similar:{clicked}] {df.iloc[int(clicked)].title}" - st.experimental_rerun() diff --git a/spaces/wallezen/so-vits-svc/inference/infer_tool.py b/spaces/wallezen/so-vits-svc/inference/infer_tool.py deleted file mode 100644 index 91561cfbfc61f3bf7334b10e8e7242574c5ed061..0000000000000000000000000000000000000000 --- a/spaces/wallezen/so-vits-svc/inference/infer_tool.py +++ /dev/null @@ -1,354 +0,0 @@ -import hashlib -import io -import json -import logging -import os -import time -from pathlib import Path -from inference import slicer -import gc - -import librosa -import numpy as np -# import onnxruntime -import parselmouth -import soundfile -import torch -import torchaudio - -import cluster -from hubert import hubert_model -import utils -from models import SynthesizerTrn - -logging.getLogger('matplotlib').setLevel(logging.WARNING) - - -def read_temp(file_name): - if not os.path.exists(file_name): - with open(file_name, "w") as f: - f.write(json.dumps({"info": "temp_dict"})) - return {} - else: - try: - with open(file_name, "r") as f: - data = f.read() - data_dict = json.loads(data) - if os.path.getsize(file_name) > 50 * 1024 * 1024: - f_name = file_name.replace("\\", "/").split("/")[-1] - print(f"clean {f_name}") - for wav_hash in list(data_dict.keys()): - if int(time.time()) - int(data_dict[wav_hash]["time"]) > 14 * 24 * 3600: - del data_dict[wav_hash] - except Exception as e: - print(e) - print(f"{file_name} error,auto rebuild file") - data_dict = {"info": "temp_dict"} - return data_dict - - -def write_temp(file_name, data): - with open(file_name, "w") as f: - f.write(json.dumps(data)) - - -def timeit(func): - def run(*args, **kwargs): - t = time.time() - res = func(*args, **kwargs) - print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t)) - return res - - return run - - -def format_wav(audio_path): - if Path(audio_path).suffix == '.wav': - return - raw_audio, raw_sample_rate = librosa.load(audio_path, mono=True, sr=None) - soundfile.write(Path(audio_path).with_suffix(".wav"), raw_audio, raw_sample_rate) - - -def get_end_file(dir_path, end): - file_lists = [] - for root, dirs, files in os.walk(dir_path): - files = [f for f in files if f[0] != '.'] - dirs[:] = [d for d in dirs if d[0] != '.'] - for f_file in files: - if f_file.endswith(end): - file_lists.append(os.path.join(root, f_file).replace("\\", "/")) - return file_lists - - -def get_md5(content): - return hashlib.new("md5", content).hexdigest() - -def fill_a_to_b(a, b): - if len(a) < len(b): - for _ in range(0, len(b) - len(a)): - a.append(a[0]) - -def mkdir(paths: list): - for path in paths: - if not os.path.exists(path): - os.mkdir(path) - -def pad_array(arr, target_length): - current_length = arr.shape[0] - if current_length >= target_length: - return arr - else: - pad_width = target_length - current_length - pad_left = pad_width // 2 - pad_right = pad_width - pad_left - padded_arr = np.pad(arr, (pad_left, pad_right), 'constant', constant_values=(0, 0)) - return padded_arr - -def split_list_by_n(list_collection, n, pre=0): - for i in range(0, len(list_collection), n): - yield list_collection[i-pre if i-pre>=0 else i: i + n] - - -class F0FilterException(Exception): - pass - -class Svc(object): - def __init__(self, net_g_path, config_path, - device=None, - cluster_model_path="logs/44k/kmeans_10000.pt", - nsf_hifigan_enhance = False - ): - self.net_g_path = net_g_path - if device is None: - self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") - else: - self.dev = torch.device(device) - self.net_g_ms = None - self.hps_ms = utils.get_hparams_from_file(config_path) - self.target_sample = self.hps_ms.data.sampling_rate - self.hop_size = self.hps_ms.data.hop_length - self.spk2id = self.hps_ms.spk - self.nsf_hifigan_enhance = nsf_hifigan_enhance - # load hubert - self.hubert_model = utils.get_hubert_model().to(self.dev) - self.load_model() - if os.path.exists(cluster_model_path): - self.cluster_model = cluster.get_cluster_model(cluster_model_path) - if self.nsf_hifigan_enhance: - from modules.enhancer import Enhancer - self.enhancer = Enhancer('nsf-hifigan', 'pretrain/nsf_hifigan/model',device=self.dev) - - def load_model(self): - # get model configuration - self.net_g_ms = SynthesizerTrn( - self.hps_ms.data.filter_length // 2 + 1, - self.hps_ms.train.segment_size // self.hps_ms.data.hop_length, - **self.hps_ms.model) - _ = utils.load_checkpoint(self.net_g_path, self.net_g_ms, None) - if "half" in self.net_g_path and torch.cuda.is_available(): - _ = self.net_g_ms.half().eval().to(self.dev) - else: - _ = self.net_g_ms.eval().to(self.dev) - - - - def get_unit_f0(self, in_path, tran, cluster_infer_ratio, speaker, f0_filter ,F0_mean_pooling,cr_threshold=0.05): - - wav, sr = librosa.load(in_path, sr=self.target_sample) - - if F0_mean_pooling == True: - f0, uv = utils.compute_f0_uv_torchcrepe(torch.FloatTensor(wav), sampling_rate=self.target_sample, hop_length=self.hop_size,device=self.dev,cr_threshold = cr_threshold) - if f0_filter and sum(f0) == 0: - raise F0FilterException("No voice detected") - f0 = torch.FloatTensor(list(f0)) - uv = torch.FloatTensor(list(uv)) - if F0_mean_pooling == False: - f0 = utils.compute_f0_parselmouth(wav, sampling_rate=self.target_sample, hop_length=self.hop_size) - if f0_filter and sum(f0) == 0: - raise F0FilterException("No voice detected") - f0, uv = utils.interpolate_f0(f0) - f0 = torch.FloatTensor(f0) - uv = torch.FloatTensor(uv) - - f0 = f0 * 2 ** (tran / 12) - f0 = f0.unsqueeze(0).to(self.dev) - uv = uv.unsqueeze(0).to(self.dev) - - wav16k = librosa.resample(wav, orig_sr=self.target_sample, target_sr=16000) - wav16k = torch.from_numpy(wav16k).to(self.dev) - c = utils.get_hubert_content(self.hubert_model, wav_16k_tensor=wav16k) - c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1]) - - if cluster_infer_ratio !=0: - cluster_c = cluster.get_cluster_center_result(self.cluster_model, c.cpu().numpy().T, speaker).T - cluster_c = torch.FloatTensor(cluster_c).to(self.dev) - c = cluster_infer_ratio * cluster_c + (1 - cluster_infer_ratio) * c - - c = c.unsqueeze(0) - return c, f0, uv - - def infer(self, speaker, tran, raw_path, - cluster_infer_ratio=0, - auto_predict_f0=False, - noice_scale=0.4, - f0_filter=False, - F0_mean_pooling=False, - enhancer_adaptive_key = 0, - cr_threshold = 0.05 - ): - - speaker_id = self.spk2id.__dict__.get(speaker) - if not speaker_id and type(speaker) is int: - if len(self.spk2id.__dict__) >= speaker: - speaker_id = speaker - sid = torch.LongTensor([int(speaker_id)]).to(self.dev).unsqueeze(0) - c, f0, uv = self.get_unit_f0(raw_path, tran, cluster_infer_ratio, speaker, f0_filter,F0_mean_pooling,cr_threshold=cr_threshold) - if "half" in self.net_g_path and torch.cuda.is_available(): - c = c.half() - with torch.no_grad(): - start = time.time() - audio = self.net_g_ms.infer(c, f0=f0, g=sid, uv=uv, predict_f0=auto_predict_f0, noice_scale=noice_scale)[0,0].data.float() - if self.nsf_hifigan_enhance: - audio, _ = self.enhancer.enhance( - audio[None,:], - self.target_sample, - f0[:,:,None], - self.hps_ms.data.hop_length, - adaptive_key = enhancer_adaptive_key) - use_time = time.time() - start - print("vits use time:{}".format(use_time)) - return audio, audio.shape[-1] - - def clear_empty(self): - # clean up vram - torch.cuda.empty_cache() - - def unload_model(self): - # unload model - self.net_g_ms = self.net_g_ms.to("cpu") - del self.net_g_ms - if hasattr(self,"enhancer"): - self.enhancer.enhancer = self.enhancer.enhancer.to("cpu") - del self.enhancer.enhancer - del self.enhancer - gc.collect() - - def slice_inference(self, - raw_audio_path, - spk, - tran, - slice_db, - cluster_infer_ratio, - auto_predict_f0, - noice_scale, - pad_seconds=0.5, - clip_seconds=0, - lg_num=0, - lgr_num =0.75, - F0_mean_pooling = False, - enhancer_adaptive_key = 0, - cr_threshold = 0.05 - ): - wav_path = raw_audio_path - chunks = slicer.cut(wav_path, db_thresh=slice_db) - audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks) - per_size = int(clip_seconds*audio_sr) - lg_size = int(lg_num*audio_sr) - lg_size_r = int(lg_size*lgr_num) - lg_size_c_l = (lg_size-lg_size_r)//2 - lg_size_c_r = lg_size-lg_size_r-lg_size_c_l - lg = np.linspace(0,1,lg_size_r) if lg_size!=0 else 0 - - audio = [] - for (slice_tag, data) in audio_data: - print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======') - # padd - length = int(np.ceil(len(data) / audio_sr * self.target_sample)) - if slice_tag: - print('jump empty segment') - _audio = np.zeros(length) - audio.extend(list(pad_array(_audio, length))) - continue - if per_size != 0: - datas = split_list_by_n(data, per_size,lg_size) - else: - datas = [data] - for k,dat in enumerate(datas): - per_length = int(np.ceil(len(dat) / audio_sr * self.target_sample)) if clip_seconds!=0 else length - if clip_seconds!=0: print(f'###=====segment clip start, {round(len(dat) / audio_sr, 3)}s======') - # padd - pad_len = int(audio_sr * pad_seconds) - dat = np.concatenate([np.zeros([pad_len]), dat, np.zeros([pad_len])]) - raw_path = io.BytesIO() - soundfile.write(raw_path, dat, audio_sr, format="wav") - raw_path.seek(0) - out_audio, out_sr = self.infer(spk, tran, raw_path, - cluster_infer_ratio=cluster_infer_ratio, - auto_predict_f0=auto_predict_f0, - noice_scale=noice_scale, - F0_mean_pooling = F0_mean_pooling, - enhancer_adaptive_key = enhancer_adaptive_key, - cr_threshold = cr_threshold - ) - _audio = out_audio.cpu().numpy() - pad_len = int(self.target_sample * pad_seconds) - _audio = _audio[pad_len:-pad_len] - _audio = pad_array(_audio, per_length) - if lg_size!=0 and k!=0: - lg1 = audio[-(lg_size_r+lg_size_c_r):-lg_size_c_r] if lgr_num != 1 else audio[-lg_size:] - lg2 = _audio[lg_size_c_l:lg_size_c_l+lg_size_r] if lgr_num != 1 else _audio[0:lg_size] - lg_pre = lg1*(1-lg)+lg2*lg - audio = audio[0:-(lg_size_r+lg_size_c_r)] if lgr_num != 1 else audio[0:-lg_size] - audio.extend(lg_pre) - _audio = _audio[lg_size_c_l+lg_size_r:] if lgr_num != 1 else _audio[lg_size:] - audio.extend(list(_audio)) - return np.array(audio) - -class RealTimeVC: - def __init__(self): - self.last_chunk = None - self.last_o = None - self.chunk_len = 16000 # chunk length - self.pre_len = 3840 # cross fade length, multiples of 640 - - # Input and output are 1-dimensional numpy waveform arrays - - def process(self, svc_model, speaker_id, f_pitch_change, input_wav_path, - cluster_infer_ratio=0, - auto_predict_f0=False, - noice_scale=0.4, - f0_filter=False): - - import maad - audio, sr = torchaudio.load(input_wav_path) - audio = audio.cpu().numpy()[0] - temp_wav = io.BytesIO() - if self.last_chunk is None: - input_wav_path.seek(0) - - audio, sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path, - cluster_infer_ratio=cluster_infer_ratio, - auto_predict_f0=auto_predict_f0, - noice_scale=noice_scale, - f0_filter=f0_filter) - - audio = audio.cpu().numpy() - self.last_chunk = audio[-self.pre_len:] - self.last_o = audio - return audio[-self.chunk_len:] - else: - audio = np.concatenate([self.last_chunk, audio]) - soundfile.write(temp_wav, audio, sr, format="wav") - temp_wav.seek(0) - - audio, sr = svc_model.infer(speaker_id, f_pitch_change, temp_wav, - cluster_infer_ratio=cluster_infer_ratio, - auto_predict_f0=auto_predict_f0, - noice_scale=noice_scale, - f0_filter=f0_filter) - - audio = audio.cpu().numpy() - ret = maad.util.crossfade(self.last_o, audio, self.pre_len) - self.last_chunk = audio[-self.pre_len:] - self.last_o = audio - return ret[self.chunk_len:2 * self.chunk_len] diff --git a/spaces/wanghaha13/ChuanhuChatGPT/chat_func.py b/spaces/wanghaha13/ChuanhuChatGPT/chat_func.py deleted file mode 100644 index 374178f3d22c5c23d1dc2952336cdc298a77315d..0000000000000000000000000000000000000000 --- a/spaces/wanghaha13/ChuanhuChatGPT/chat_func.py +++ /dev/null @@ -1,456 +0,0 @@ -# -*- coding:utf-8 -*- -from __future__ import annotations -from typing import TYPE_CHECKING, List - -import logging -import json -import os -import requests -import urllib3 - -from tqdm import tqdm -import colorama -from duckduckgo_search import ddg -import asyncio -import aiohttp - -from presets import * -from llama_func import * -from utils import * - -# logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s") - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: List[str] - data: List[List[str | int | bool]] - - -initial_prompt = "You are a helpful assistant." -API_URL = "https://api.openai.com/v1/chat/completions" -HISTORY_DIR = "history" -TEMPLATES_DIR = "templates" - -def get_response( - openai_api_key, system_prompt, history, temperature, top_p, stream, selected_model -): - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}", - } - - history = [construct_system(system_prompt), *history] - - payload = { - "model": selected_model, - "messages": history, # [{"role": "user", "content": f"{inputs}"}], - "temperature": temperature, # 1.0, - "top_p": top_p, # 1.0, - "n": 1, - "stream": stream, - "presence_penalty": 0, - "frequency_penalty": 0, - } - if stream: - timeout = timeout_streaming - else: - timeout = timeout_all - - # 获取环境变量中的代理设置 - http_proxy = os.environ.get("HTTP_PROXY") or os.environ.get("http_proxy") - https_proxy = os.environ.get("HTTPS_PROXY") or os.environ.get("https_proxy") - - # 如果存在代理设置,使用它们 - proxies = {} - if http_proxy: - logging.info(f"Using HTTP proxy: {http_proxy}") - proxies["http"] = http_proxy - if https_proxy: - logging.info(f"Using HTTPS proxy: {https_proxy}") - proxies["https"] = https_proxy - - # 如果有代理,使用代理发送请求,否则使用默认设置发送请求 - if proxies: - response = requests.post( - API_URL, - headers=headers, - json=payload, - stream=True, - timeout=timeout, - proxies=proxies, - ) - else: - response = requests.post( - API_URL, - headers=headers, - json=payload, - stream=True, - timeout=timeout, - ) - return response - - -def stream_predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=None, - display_append="" -): - def get_return_value(): - return chatbot, history, status_text, all_token_counts - - logging.info("实时回答模式") - partial_words = "" - counter = 0 - status_text = "开始实时传输回答……" - history.append(construct_user(inputs)) - history.append(construct_assistant("")) - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - user_token_count = 0 - if len(all_token_counts) == 0: - system_prompt_token_count = count_token(construct_system(system_prompt)) - user_token_count = ( - count_token(construct_user(inputs)) + system_prompt_token_count - ) - else: - user_token_count = count_token(construct_user(inputs)) - all_token_counts.append(user_token_count) - logging.info(f"输入token计数: {user_token_count}") - yield get_return_value() - try: - response = get_response( - openai_api_key, - system_prompt, - history, - temperature, - top_p, - True, - selected_model, - ) - except requests.exceptions.ConnectTimeout: - status_text = ( - standard_error_msg + connection_timeout_prompt + error_retrieve_prompt - ) - yield get_return_value() - return - except requests.exceptions.ReadTimeout: - status_text = standard_error_msg + read_timeout_prompt + error_retrieve_prompt - yield get_return_value() - return - - yield get_return_value() - error_json_str = "" - - for chunk in tqdm(response.iter_lines()): - if counter == 0: - counter += 1 - continue - counter += 1 - # check whether each line is non-empty - if chunk: - chunk = chunk.decode() - chunklength = len(chunk) - try: - chunk = json.loads(chunk[6:]) - except json.JSONDecodeError: - logging.info(chunk) - error_json_str += chunk - status_text = f"JSON解析错误。请重置对话。收到的内容: {error_json_str}" - yield get_return_value() - continue - # decode each line as response data is in bytes - if chunklength > 6 and "delta" in chunk["choices"][0]: - finish_reason = chunk["choices"][0]["finish_reason"] - status_text = construct_token_message( - sum(all_token_counts), stream=True - ) - if finish_reason == "stop": - yield get_return_value() - break - try: - partial_words = ( - partial_words + chunk["choices"][0]["delta"]["content"] - ) - except KeyError: - status_text = ( - standard_error_msg - + "API回复中找不到内容。很可能是Token计数达到上限了。请重置对话。当前Token计数: " - + str(sum(all_token_counts)) - ) - yield get_return_value() - break - history[-1] = construct_assistant(partial_words) - chatbot[-1] = (chatbot[-1][0], partial_words+display_append) - all_token_counts[-1] += 1 - yield get_return_value() - - -def predict_all( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=None, - display_append="" -): - logging.info("一次性回答模式") - history.append(construct_user(inputs)) - history.append(construct_assistant("")) - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - all_token_counts.append(count_token(construct_user(inputs))) - try: - response = get_response( - openai_api_key, - system_prompt, - history, - temperature, - top_p, - False, - selected_model, - ) - except requests.exceptions.ConnectTimeout: - status_text = ( - standard_error_msg + connection_timeout_prompt + error_retrieve_prompt - ) - return chatbot, history, status_text, all_token_counts - except requests.exceptions.ProxyError: - status_text = standard_error_msg + proxy_error_prompt + error_retrieve_prompt - return chatbot, history, status_text, all_token_counts - except requests.exceptions.SSLError: - status_text = standard_error_msg + ssl_error_prompt + error_retrieve_prompt - return chatbot, history, status_text, all_token_counts - response = json.loads(response.text) - content = response["choices"][0]["message"]["content"] - history[-1] = construct_assistant(content) - chatbot[-1] = (chatbot[-1][0], content+display_append) - total_token_count = response["usage"]["total_tokens"] - all_token_counts[-1] = total_token_count - sum(all_token_counts) - status_text = construct_token_message(total_token_count) - return chatbot, history, status_text, all_token_counts - - -def predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - stream=False, - selected_model=MODELS[0], - use_websearch=False, - files = None, - should_check_token_count=True, -): # repetition_penalty, top_k - logging.info("输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL) - if files: - msg = "构建索引中……(这可能需要比较久的时间)" - logging.info(msg) - yield chatbot, history, msg, all_token_counts - index = construct_index(openai_api_key, file_src=files) - msg = "索引构建完成,获取回答中……" - yield chatbot, history, msg, all_token_counts - history, chatbot, status_text = chat_ai(openai_api_key, index, inputs, history, chatbot) - yield chatbot, history, status_text, all_token_counts - return - - old_inputs = "" - link_references = [] - if use_websearch: - search_results = ddg(inputs, max_results=5) - old_inputs = inputs - web_results = [] - for idx, result in enumerate(search_results): - logging.info(f"搜索结果{idx + 1}:{result}") - domain_name = urllib3.util.parse_url(result["href"]).host - web_results.append(f'[{idx+1}]"{result["body"]}"\nURL: {result["href"]}') - link_references.append(f"{idx+1}. [{domain_name}]({result['href']})\n") - link_references = "\n\n" + "".join(link_references) - inputs = ( - replace_today(WEBSEARCH_PTOMPT_TEMPLATE) - .replace("{query}", inputs) - .replace("{web_results}", "\n\n".join(web_results)) - ) - else: - link_references = "" - - if len(openai_api_key) != 51: - status_text = standard_error_msg + no_apikey_msg - logging.info(status_text) - chatbot.append((inputs, "")) - if len(history) == 0: - history.append(construct_user(inputs)) - history.append("") - all_token_counts.append(0) - else: - history[-2] = construct_user(inputs) - yield chatbot, history, status_text, all_token_counts - return - - yield chatbot, history, "开始生成回答……", all_token_counts - - if stream: - logging.info("使用流式传输") - iter = stream_predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=old_inputs, - display_append=link_references - ) - for chatbot, history, status_text, all_token_counts in iter: - yield chatbot, history, status_text, all_token_counts - else: - logging.info("不使用流式传输") - chatbot, history, status_text, all_token_counts = predict_all( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=old_inputs, - display_append=link_references - ) - yield chatbot, history, status_text, all_token_counts - - logging.info(f"传输完毕。当前token计数为{all_token_counts}") - if len(history) > 1 and history[-1]["content"] != inputs: - logging.info( - "回答为:" - + colorama.Fore.BLUE - + f"{history[-1]['content']}" - + colorama.Style.RESET_ALL - ) - - if stream: - max_token = max_token_streaming - else: - max_token = max_token_all - - if sum(all_token_counts) > max_token and should_check_token_count: - status_text = f"精简token中{all_token_counts}/{max_token}" - logging.info(status_text) - yield chatbot, history, status_text, all_token_counts - iter = reduce_token_size( - openai_api_key, - system_prompt, - history, - chatbot, - all_token_counts, - top_p, - temperature, - max_token//2, - selected_model=selected_model, - ) - for chatbot, history, status_text, all_token_counts in iter: - status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}" - yield chatbot, history, status_text, all_token_counts - - -def retry( - openai_api_key, - system_prompt, - history, - chatbot, - token_count, - top_p, - temperature, - stream=False, - selected_model=MODELS[0], -): - logging.info("重试中……") - if len(history) == 0: - yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count - return - history.pop() - inputs = history.pop()["content"] - token_count.pop() - iter = predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - token_count, - top_p, - temperature, - stream=stream, - selected_model=selected_model, - ) - logging.info("重试中……") - for x in iter: - yield x - logging.info("重试完毕") - - -def reduce_token_size( - openai_api_key, - system_prompt, - history, - chatbot, - token_count, - top_p, - temperature, - max_token_count, - selected_model=MODELS[0], -): - logging.info("开始减少token数量……") - iter = predict( - openai_api_key, - system_prompt, - history, - summarize_prompt, - chatbot, - token_count, - top_p, - temperature, - selected_model=selected_model, - should_check_token_count=False, - ) - logging.info(f"chatbot: {chatbot}") - flag = False - for chatbot, history, status_text, previous_token_count in iter: - num_chat = find_n(previous_token_count, max_token_count) - if flag: - chatbot = chatbot[:-1] - flag = True - history = history[-2*num_chat:] if num_chat > 0 else [] - token_count = previous_token_count[-num_chat:] if num_chat > 0 else [] - msg = f"保留了最近{num_chat}轮对话" - yield chatbot, history, msg + "," + construct_token_message( - sum(token_count) if len(token_count) > 0 else 0, - ), token_count - logging.info(msg) - logging.info("减少token数量完毕") \ No newline at end of file diff --git a/spaces/wanghuoto/gogoai/src/lib/isomorphic/node.ts b/spaces/wanghuoto/gogoai/src/lib/isomorphic/node.ts deleted file mode 100644 index da213ad6a86181979f098309c374da02835db5a0..0000000000000000000000000000000000000000 --- a/spaces/wanghuoto/gogoai/src/lib/isomorphic/node.ts +++ /dev/null @@ -1,26 +0,0 @@ -import Debug from 'debug' - -const { fetch, setGlobalDispatcher, ProxyAgent } = require('undici') -const { HttpsProxyAgent } = require('https-proxy-agent') -const ws = require('ws') - -const debug = Debug('bingo') - -const httpProxy = process.env.http_proxy || process.env.HTTP_PROXY || process.env.https_proxy || process.env.HTTPS_PROXY; -let WebSocket = ws.WebSocket - -if (httpProxy) { - setGlobalDispatcher(new ProxyAgent(httpProxy)) - const agent = new HttpsProxyAgent(httpProxy) - // @ts-ignore - WebSocket = class extends ws.WebSocket { - constructor(address: string | URL, options: typeof ws.WebSocket) { - super(address, { - ...options, - agent, - }) - } - } -} - -export default { fetch, WebSocket, debug } diff --git a/spaces/wffcyrus/MetaGPT-v1/metagpt/tools/search_engine_serper.py b/spaces/wffcyrus/MetaGPT-v1/metagpt/tools/search_engine_serper.py deleted file mode 100644 index 0eec2694bf1aee218ab0e6138664c8edf8d8f1e2..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/metagpt/tools/search_engine_serper.py +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/23 18:27 -@Author : alexanderwu -@File : search_engine_serpapi.py -""" -import json -from typing import Any, Dict, Optional, Tuple - -import aiohttp -from pydantic import BaseModel, Field, validator - -from metagpt.config import CONFIG - - -class SerperWrapper(BaseModel): - search_engine: Any #: :meta private: - payload: dict = Field(default={"page": 1, "num": 10}) - serper_api_key: Optional[str] = None - aiosession: Optional[aiohttp.ClientSession] = None - - class Config: - arbitrary_types_allowed = True - - @validator("serper_api_key", always=True) - @classmethod - def check_serper_api_key(cls, val: str): - val = val or CONFIG.serper_api_key - if not val: - raise ValueError( - "To use, make sure you provide the serper_api_key when constructing an object. Alternatively, " - "ensure that the environment variable SERPER_API_KEY is set with your API key. You can obtain " - "an API key from https://serper.dev/." - ) - return val - - async def run(self, query: str, max_results: int = 8, as_string: bool = True, **kwargs: Any) -> str: - """Run query through Serper and parse result async.""" - if isinstance(query, str): - return self._process_response((await self.results([query], max_results))[0], as_string=as_string) - else: - results = [self._process_response(res, as_string) for res in await self.results(query, max_results)] - return "\n".join(results) if as_string else results - - async def results(self, queries: list[str], max_results: int = 8) -> dict: - """Use aiohttp to run query through Serper and return the results async.""" - - def construct_url_and_payload_and_headers() -> Tuple[str, Dict[str, str]]: - payloads = self.get_payloads(queries, max_results) - url = "https://google.serper.dev/search" - headers = self.get_headers() - return url, payloads, headers - - url, payloads, headers = construct_url_and_payload_and_headers() - if not self.aiosession: - async with aiohttp.ClientSession() as session: - async with session.post(url, data=payloads, headers=headers) as response: - res = await response.json() - else: - async with self.aiosession.get.post(url, data=payloads, headers=headers) as response: - res = await response.json() - - return res - - def get_payloads(self, queries: list[str], max_results: int) -> Dict[str, str]: - """Get payloads for Serper.""" - payloads = [] - for query in queries: - _payload = { - "q": query, - "num": max_results, - } - payloads.append({**self.payload, **_payload}) - return json.dumps(payloads, sort_keys=True) - - def get_headers(self) -> Dict[str, str]: - headers = {"X-API-KEY": self.serper_api_key, "Content-Type": "application/json"} - return headers - - @staticmethod - def _process_response(res: dict, as_string: bool = False) -> str: - """Process response from SerpAPI.""" - # logger.debug(res) - focus = ["title", "snippet", "link"] - - def get_focused(x): - return {i: j for i, j in x.items() if i in focus} - - if "error" in res.keys(): - raise ValueError(f"Got error from SerpAPI: {res['error']}") - if "answer_box" in res.keys() and "answer" in res["answer_box"].keys(): - toret = res["answer_box"]["answer"] - elif "answer_box" in res.keys() and "snippet" in res["answer_box"].keys(): - toret = res["answer_box"]["snippet"] - elif "answer_box" in res.keys() and "snippet_highlighted_words" in res["answer_box"].keys(): - toret = res["answer_box"]["snippet_highlighted_words"][0] - elif "sports_results" in res.keys() and "game_spotlight" in res["sports_results"].keys(): - toret = res["sports_results"]["game_spotlight"] - elif "knowledge_graph" in res.keys() and "description" in res["knowledge_graph"].keys(): - toret = res["knowledge_graph"]["description"] - elif "snippet" in res["organic"][0].keys(): - toret = res["organic"][0]["snippet"] - else: - toret = "No good search result found" - - toret_l = [] - if "answer_box" in res.keys() and "snippet" in res["answer_box"].keys(): - toret_l += [get_focused(res["answer_box"])] - if res.get("organic"): - toret_l += [get_focused(i) for i in res.get("organic")] - - return str(toret) + "\n" + str(toret_l) if as_string else toret_l - - -if __name__ == "__main__": - import fire - - fire.Fire(SerperWrapper().run) diff --git a/spaces/wilson1/bingo/src/app/layout.tsx b/spaces/wilson1/bingo/src/app/layout.tsx deleted file mode 100644 index 8b5122759987177b8dc4e4356d1d06cea25c15ea..0000000000000000000000000000000000000000 --- a/spaces/wilson1/bingo/src/app/layout.tsx +++ /dev/null @@ -1,47 +0,0 @@ -import { Metadata } from 'next' -import { Toaster } from 'react-hot-toast' -import { TailwindIndicator } from '@/components/tailwind-indicator' -import { Providers } from '@/components/providers' -import { Header } from '@/components/header' - -import '@/app/globals.scss' - - -export const metadata: Metadata = { - title: { - default: 'Bing AI Chatbot', - template: `%s - Bing AI Chatbot` - }, - description: 'Bing AI Chatbot Web App.', - themeColor: [ - { media: '(prefers-color-scheme: light)', color: 'white' }, - { media: '(prefers-color-scheme: dark)', color: 'dark' } - ], - icons: { - icon: '/favicon.ico', - shortcut: '../assets/images/logo.svg', - apple: '../assets/images/logo.svg' - } -} - -interface RootLayoutProps { - children: React.ReactNode -} - -export default function RootLayout({ children }: RootLayoutProps) { - return ( - - - - -
                - {/* @ts-ignore */} -
                -
                {children}
                -
                - -
                - - - ) -} diff --git a/spaces/wuhuik/bingo/src/components/header.tsx b/spaces/wuhuik/bingo/src/components/header.tsx deleted file mode 100644 index dc298b722154d1ac6d7a7e148204605562d6cc58..0000000000000000000000000000000000000000 --- a/spaces/wuhuik/bingo/src/components/header.tsx +++ /dev/null @@ -1,12 +0,0 @@ -import * as React from 'react' -import { UserMenu } from './user-menu' - -export async function Header() { - return ( -
                -
                - -
                -
                - ) -} diff --git a/spaces/wydgg/bingo-wyd-ai/src/components/chat-list.tsx b/spaces/wydgg/bingo-wyd-ai/src/components/chat-list.tsx deleted file mode 100644 index 624a78ef0d7be0f1192cf02a81e2e9cf214cb193..0000000000000000000000000000000000000000 --- a/spaces/wydgg/bingo-wyd-ai/src/components/chat-list.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import React from 'react' - -import { Separator } from '@/components/ui/separator' -import { ChatMessage } from '@/components/chat-message' -import { ChatMessageModel } from '@/lib/bots/bing/types' - -export interface ChatList { - messages: ChatMessageModel[] -} - -export function ChatList({ messages }: ChatList) { - if (!messages.length) { - return null - } - - return ( -
                - {messages.map((message, index) => ( - - - {index < messages.length - 1 && ( - - )} - - ))} -
                - ) -} diff --git a/spaces/wydgg/bingo-wyd-ai/src/lib/storage.ts b/spaces/wydgg/bingo-wyd-ai/src/lib/storage.ts deleted file mode 100644 index a5b7825c4f76a28c704da512ae39e8bb45addd09..0000000000000000000000000000000000000000 --- a/spaces/wydgg/bingo-wyd-ai/src/lib/storage.ts +++ /dev/null @@ -1,27 +0,0 @@ -import { getMany, set, del, clear } from 'idb-keyval'; - -export const Storage = { - async get(key: string | string[] | null): Promise { - if (key === null) return null; - if (typeof key === 'string') { - key = [key] - } - const returnData: Record = {} - const values = await getMany(key) - key.forEach((k, idx)=> { - returnData[k] = values[idx] - }) - return returnData; - }, - async set(object: any) { - for (let key of Object.keys(object)) { - await set(key, object[key]) - } - }, - async remove(key: string) { - return del(key); - }, - async clear() { - return clear(); - } -} diff --git a/spaces/xdecoder/Demo/xdecoder/language/vlpencoder.py b/spaces/xdecoder/Demo/xdecoder/language/vlpencoder.py deleted file mode 100644 index ce6fd4709255e8869749d7401babb373b187d697..0000000000000000000000000000000000000000 --- a/spaces/xdecoder/Demo/xdecoder/language/vlpencoder.py +++ /dev/null @@ -1,168 +0,0 @@ - -import torch -from torch import nn -from torch.nn import functional as F - -from timm.models.layers import trunc_normal_ - -from .registry import register_model -from ..utils import configurable -from .LangEncoder import build_tokenizer, build_lang_encoder -from utils.misc import prompt_engineering, get_prompt_templates - - -class LanguageEncoder(nn.Module): - - @configurable - def __init__( - self, - tokenizer, - tokenizer_type, - lang_encoder, - lang_projection, - max_token_num, - ): - super().__init__() - self.tokenizer = tokenizer - self.tokenizer_type = tokenizer_type - self.lang_encoder = lang_encoder - self.lang_proj = lang_projection - self.max_token_num = max_token_num - self.logit_scale = nn.Parameter(torch.ones([])) - - @classmethod - def from_config(cls, cfg): - tokenizer = build_tokenizer(cfg['MODEL']['TEXT']) - tokenizer_type = cfg['MODEL']['TEXT']['TOKENIZER'] - lang_encoder = build_lang_encoder(cfg['MODEL']['TEXT'], tokenizer, cfg['VERBOSE']) - max_token_num = cfg['MODEL']['TEXT']['CONTEXT_LENGTH'] - - dim_lang = cfg['MODEL']['TEXT']['WIDTH'] - dim_projection = cfg['MODEL']['DIM_PROJ'] - lang_projection = nn.Parameter(torch.empty(dim_lang, dim_projection)) - trunc_normal_(lang_projection, std=.02) - - return { - "tokenizer": tokenizer, - "tokenizer_type": tokenizer_type, - "lang_encoder": lang_encoder, - "lang_projection": lang_projection, - "max_token_num": max_token_num, - } - - def get_text_embeddings(self, class_names, name='default', is_eval=False, add_bgd=False, prompt=True, norm=True): - if not is_eval: - if prompt: - # randomly sample one template - arbitary_concepts = [ - prompt_engineering(class_names[label].replace('-other','').replace('-merged','').replace('-stuff',''), topk=10000, suffix='.') \ - for label in range(len(class_names)) - ] - if add_bgd: - arbitary_concepts.append("A background in coco.") - else: - arbitary_concepts = class_names - - input_ids = [] - attention_masks = [] - for txt in arbitary_concepts: - tokens = self.tokenizer( - txt, padding='max_length', truncation=True, max_length=self.max_token_num, return_tensors='pt' - ) - tokens['input_ids'].squeeze_() - tokens['attention_mask'].squeeze_() - - input_ids.append(tokens['input_ids']) - attention_masks.append(tokens['attention_mask']) - - arbitary_tokens = torch.stack(input_ids) - arbitary_attention_masks = torch.stack(attention_masks) - - text_emb = self.forward_language((arbitary_tokens.cuda(), arbitary_attention_masks.cuda()), norm=norm) - setattr(self, '{}_text_embeddings'.format(name), text_emb) - else: - with torch.no_grad(): - def extract_mean_emb(txts): - tokens = self.tokenizer( - txts, padding='max_length', truncation=True, max_length=self.max_token_num, return_tensors='pt' - ) - clss_embedding = self.forward_language((tokens['input_ids'].cuda(), tokens['attention_mask'].cuda()), norm=norm) - clss_embedding = clss_embedding.mean(dim=0) - clss_embedding /= clss_embedding.norm() - return clss_embedding - - templates = get_prompt_templates() - clss_embeddings = [] - if prompt: - for clss in class_names: - txts = [template.format(clss.replace('-other','').replace('-merged','').replace('-stuff','')) for template in templates] - clss_embeddings.append(extract_mean_emb(txts)) - else: - clss_embeddings.append(extract_mean_emb(class_names)) - - if add_bgd: - txts = ["A background in coco."] - clss_embeddings.append(extract_mean_emb(txts)) - - text_emb = torch.stack(clss_embeddings, dim=0) - setattr(self, '{}_text_embeddings'.format(name), text_emb) - - def get_text_token_embeddings(self, txts, name='default', token=False, norm=False): - if not token: - tokens = self.tokenizer( - txts, padding='max_length', truncation=True, max_length=self.max_token_num, return_tensors='pt' - ) - tokens = {key: value.cuda() for key, value in tokens.items()} - else: - tokens = txts - token_emb, class_emb = self.forward_language_token((tokens['input_ids'], tokens['attention_mask']), norm=norm) - ret = {"tokens": tokens, - "token_emb": token_emb, - "class_emb": class_emb,} - setattr(self, '{}_token_embeddings'.format(name), ret) - return ret - - def forward_language(self, texts, norm=True): - x = self.lang_encoder(*texts) - x = x['last_hidden_state'] - - if self.tokenizer_type == 'clip': - x = x[torch.arange(x.size(0)), texts[0].argmax(dim=-1)] - else: - x = x[:, 0] - - x = x @ self.lang_proj - if norm: - x = x / (x.norm(dim=-1, keepdim=True) + 1e-7) - return x - - def forward_language_token(self, texts, norm=False): - x = self.lang_encoder(*texts) - token_x = x['last_hidden_state'] - - if self.tokenizer_type == 'clip': - class_x = token_x[torch.arange(token_x.size(0)), texts[0].argmax(dim=-1)] - else: - class_x = token_x[:, 0] - - class_x = class_x @ self.lang_proj - token_x = token_x @ self.lang_proj - - if norm: - class_x = class_x / (class_x.norm(dim=-1, keepdim=True) + 1e-7) - token_x = token_x / (token_x.norm(dim=-1, keepdim=True) + 1e-7) - - return token_x, class_x - - def compute_similarity(self, v_emb, name='default', fake=False): - if fake: - return None - v_emb = v_emb / (v_emb.norm(dim=-1, keepdim=True) + 1e-7) - t_emb = getattr(self, '{}_text_embeddings'.format(name)) - output = self.logit_scale.exp() * v_emb @ t_emb.unsqueeze(0).transpose(1, 2) - return output - - -@register_model -def get_language_model(cfg, **kwargs): - return LanguageEncoder(cfg) \ No newline at end of file diff --git a/spaces/xu1998hz/sescore/README.md b/spaces/xu1998hz/sescore/README.md deleted file mode 100644 index 83baa7e86b786836d80c22f3276151cc951fef03..0000000000000000000000000000000000000000 --- a/spaces/xu1998hz/sescore/README.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: SEScore -datasets: -- -tags: -- evaluate -- metric -description: "SEScore: a text generation evaluation metric" -sdk: gradio -sdk_version: 3.0.2 -app_file: app.py -pinned: false ---- - -# Metric Card for SEScore -![alt text](https://huggingface.co/spaces/xu1998hz/sescore/blob/main/img/logo_sescore.png) - -## Metric Description -*SEScore is an unsupervised learned evaluation metric trained on synthesized dataset* - -## How to Use - -*Provide simplest possible example for using the metric* - -### Inputs -*SEScore takes input of predictions (a list of candidate translations) and references (a list of reference translations).* - -### Output Values - -*Output value is between 0 to -25* - -#### Values from Popular Papers - - -### Examples -*Give code examples of the metric being used. Try to include examples that clear up any potential ambiguity left from the metric description above. If possible, provide a range of examples that show both typical and atypical results, as well as examples where a variety of input parameters are passed.* - -## Limitations and Bias -*Note any known limitations or biases that the metric has, with links and references if possible.* - -## Citation -*Cite the source where this metric was introduced.* - -## Further References -*Add any useful further references.* diff --git a/spaces/ybelkada/interfacegan_pp/torch_utils/ops/bias_act.cpp b/spaces/ybelkada/interfacegan_pp/torch_utils/ops/bias_act.cpp deleted file mode 100644 index 3adaeee2ae44e96655d354c2bdfb81de8ebfe6c6..0000000000000000000000000000000000000000 --- a/spaces/ybelkada/interfacegan_pp/torch_utils/ops/bias_act.cpp +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include -#include -#include -#include "bias_act.h" - -//------------------------------------------------------------------------ - -static bool has_same_layout(torch::Tensor x, torch::Tensor y) -{ - if (x.dim() != y.dim()) - return false; - for (int64_t i = 0; i < x.dim(); i++) - { - if (x.size(i) != y.size(i)) - return false; - if (x.size(i) >= 2 && x.stride(i) != y.stride(i)) - return false; - } - return true; -} - -//------------------------------------------------------------------------ - -static torch::Tensor bias_act(torch::Tensor x, torch::Tensor b, torch::Tensor xref, torch::Tensor yref, torch::Tensor dy, int grad, int dim, int act, float alpha, float gain, float clamp) -{ - // Validate arguments. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - TORCH_CHECK(b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x"); - TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x"); - TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x"); - TORCH_CHECK(dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x"); - TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); - TORCH_CHECK(b.dim() == 1, "b must have rank 1"); - TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds"); - TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements"); - TORCH_CHECK(grad >= 0, "grad must be non-negative"); - - // Validate layout. - TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense"); - TORCH_CHECK(b.is_contiguous(), "b must be contiguous"); - TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x"); - TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x"); - TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x"); - - // Create output tensor. - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - torch::Tensor y = torch::empty_like(x); - TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x"); - - // Initialize CUDA kernel parameters. - bias_act_kernel_params p; - p.x = x.data_ptr(); - p.b = (b.numel()) ? b.data_ptr() : NULL; - p.xref = (xref.numel()) ? xref.data_ptr() : NULL; - p.yref = (yref.numel()) ? yref.data_ptr() : NULL; - p.dy = (dy.numel()) ? dy.data_ptr() : NULL; - p.y = y.data_ptr(); - p.grad = grad; - p.act = act; - p.alpha = alpha; - p.gain = gain; - p.clamp = clamp; - p.sizeX = (int)x.numel(); - p.sizeB = (int)b.numel(); - p.stepB = (b.numel()) ? (int)x.stride(dim) : 1; - - // Choose CUDA kernel. - void* kernel; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] - { - kernel = choose_bias_act_kernel(p); - }); - TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func"); - - // Launch CUDA kernel. - p.loopX = 4; - int blockSize = 4 * 32; - int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1; - void* args[] = {&p}; - AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); - return y; -} - -//------------------------------------------------------------------------ - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("bias_act", &bias_act); -} - -//------------------------------------------------------------------------ diff --git a/spaces/yerfor/SyntaSpeech/tasks/tts/ps_flow.py b/spaces/yerfor/SyntaSpeech/tasks/tts/ps_flow.py deleted file mode 100644 index 37a2469ed08d382b58bcb6b8b1750986bb3dd345..0000000000000000000000000000000000000000 --- a/spaces/yerfor/SyntaSpeech/tasks/tts/ps_flow.py +++ /dev/null @@ -1,134 +0,0 @@ -import torch -from modules.tts.portaspeech.portaspeech_flow import PortaSpeechFlow -from tasks.tts.fs import FastSpeechTask -from tasks.tts.ps import PortaSpeechTask -from utils.audio.pitch.utils import denorm_f0 -from utils.commons.hparams import hparams - - -class PortaSpeechFlowTask(PortaSpeechTask): - def __init__(self): - super().__init__() - self.training_post_glow = False - - def build_tts_model(self): - ph_dict_size = len(self.token_encoder) - word_dict_size = len(self.word_encoder) - self.model = PortaSpeechFlow(ph_dict_size, word_dict_size, hparams) - - def _training_step(self, sample, batch_idx, opt_idx): - self.training_post_glow = self.global_step >= hparams['post_glow_training_start'] \ - and hparams['use_post_flow'] - if hparams['two_stage'] and \ - ((opt_idx == 0 and self.training_post_glow) or (opt_idx == 1 and not self.training_post_glow)): - return None - loss_output, _ = self.run_model(sample) - total_loss = sum([v for v in loss_output.values() if isinstance(v, torch.Tensor) and v.requires_grad]) - loss_output['batch_size'] = sample['txt_tokens'].size()[0] - if 'postflow' in loss_output and loss_output['postflow'] is None: - return None - return total_loss, loss_output - - def run_model(self, sample, infer=False, *args, **kwargs): - if not infer: - training_post_glow = self.training_post_glow - spk_embed = sample.get('spk_embed') - spk_id = sample.get('spk_ids') - output = self.model(sample['txt_tokens'], - sample['word_tokens'], - ph2word=sample['ph2word'], - mel2word=sample['mel2word'], - mel2ph=sample['mel2ph'], - word_len=sample['word_lengths'].max(), - tgt_mels=sample['mels'], - pitch=sample.get('pitch'), - spk_embed=spk_embed, - spk_id=spk_id, - infer=False, - forward_post_glow=training_post_glow, - two_stage=hparams['two_stage'], - global_step=self.global_step) - losses = {} - self.add_mel_loss(output['mel_out'], sample['mels'], losses) - if (training_post_glow or not hparams['two_stage']) and hparams['use_post_flow']: - losses['postflow'] = output['postflow'] - losses['l1'] = losses['l1'].detach() - losses['ssim'] = losses['ssim'].detach() - if not training_post_glow or not hparams['two_stage'] or not self.training: - losses['kl'] = output['kl'] - if self.global_step < hparams['kl_start_steps']: - losses['kl'] = losses['kl'].detach() - else: - losses['kl'] = torch.clamp(losses['kl'], min=hparams['kl_min']) - losses['kl'] = losses['kl'] * hparams['lambda_kl'] - if hparams['dur_level'] == 'word': - self.add_dur_loss( - output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses) - self.get_attn_stats(output['attn'], sample, losses) - else: - super().add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses) - return losses, output - else: - use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur']) - forward_post_glow = self.global_step >= hparams['post_glow_training_start'] + 1000 \ - and hparams['use_post_flow'] - spk_embed = sample.get('spk_embed') - spk_id = sample.get('spk_ids') - output = self.model( - sample['txt_tokens'], - sample['word_tokens'], - ph2word=sample['ph2word'], - word_len=sample['word_lengths'].max(), - pitch=sample.get('pitch'), - mel2ph=sample['mel2ph'] if use_gt_dur else None, - mel2word=sample['mel2word'] if hparams['profile_infer'] or hparams['use_gt_dur'] else None, - infer=True, - forward_post_glow=forward_post_glow, - spk_embed=spk_embed, - spk_id=spk_id, - two_stage=hparams['two_stage'] - ) - return output - - def validation_step(self, sample, batch_idx): - self.training_post_glow = self.global_step >= hparams['post_glow_training_start'] \ - and hparams['use_post_flow'] - return super().validation_step(sample, batch_idx) - - def save_valid_result(self, sample, batch_idx, model_out): - super(PortaSpeechFlowTask, self).save_valid_result(sample, batch_idx, model_out) - sr = hparams['audio_sample_rate'] - f0_gt = None - if sample.get('f0') is not None: - f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu()) - if self.global_step > 0: - # save FVAE result - if hparams['use_post_flow']: - wav_pred = self.vocoder.spec2wav(model_out['mel_out_fvae'][0].cpu(), f0=f0_gt) - self.logger.add_audio(f'wav_fvae_{batch_idx}', wav_pred, self.global_step, sr) - self.plot_mel(batch_idx, sample['mels'], model_out['mel_out_fvae'][0], - f'mel_fvae_{batch_idx}', f0s=f0_gt) - - def build_optimizer(self, model): - if hparams['two_stage'] and hparams['use_post_flow']: - self.optimizer = torch.optim.AdamW( - [p for name, p in self.model.named_parameters() if 'post_flow' not in name], - lr=hparams['lr'], - betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), - weight_decay=hparams['weight_decay']) - self.post_flow_optimizer = torch.optim.AdamW( - self.model.post_flow.parameters(), - lr=hparams['post_flow_lr'], - betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), - weight_decay=hparams['weight_decay']) - return [self.optimizer, self.post_flow_optimizer] - else: - self.optimizer = torch.optim.AdamW( - self.model.parameters(), - lr=hparams['lr'], - betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), - weight_decay=hparams['weight_decay']) - return [self.optimizer] - - def build_scheduler(self, optimizer): - return FastSpeechTask.build_scheduler(self, optimizer[0]) \ No newline at end of file diff --git a/spaces/ygangang/CodeFormer/CodeFormer/facelib/detection/yolov5face/utils/extract_ckpt.py b/spaces/ygangang/CodeFormer/CodeFormer/facelib/detection/yolov5face/utils/extract_ckpt.py deleted file mode 100644 index 4b8b631348f2d0cdea4e5a3594bb59f3e8f34a0f..0000000000000000000000000000000000000000 --- a/spaces/ygangang/CodeFormer/CodeFormer/facelib/detection/yolov5face/utils/extract_ckpt.py +++ /dev/null @@ -1,5 +0,0 @@ -import torch -import sys -sys.path.insert(0,'./facelib/detection/yolov5face') -model = torch.load('facelib/detection/yolov5face/yolov5n-face.pt', map_location='cpu')['model'] -torch.save(model.state_dict(),'weights/facelib/yolov5n-face.pth') \ No newline at end of file diff --git a/spaces/ygtxr1997/ReliableSwap_Demo/third_party/GPEN/face_detect/utils/__init__.py b/spaces/ygtxr1997/ReliableSwap_Demo/third_party/GPEN/face_detect/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dinov2/modeling_dinov2.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dinov2/modeling_dinov2.py deleted file mode 100644 index 8816dbe49c7bedc7162ed93f54ab791cef01f0b7..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dinov2/modeling_dinov2.py +++ /dev/null @@ -1,865 +0,0 @@ -# coding=utf-8 -# Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" PyTorch DINOv2 model.""" - - -import collections.abc -import math -from typing import Dict, List, Optional, Set, Tuple, Union - -import torch -import torch.utils.checkpoint -from torch import nn -from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss - -from ...activations import ACT2FN -from ...modeling_outputs import ( - BackboneOutput, - BaseModelOutput, - BaseModelOutputWithPooling, - ImageClassifierOutput, -) -from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer -from ...utils import ( - add_code_sample_docstrings, - add_start_docstrings, - add_start_docstrings_to_model_forward, - logging, - replace_return_docstrings, -) -from ...utils.backbone_utils import BackboneMixin -from .configuration_dinov2 import Dinov2Config - - -logger = logging.get_logger(__name__) - -# General docstring -_CONFIG_FOR_DOC = "Dinov2Config" - -# Base docstring -_CHECKPOINT_FOR_DOC = "facebook/dinov2-base" -_EXPECTED_OUTPUT_SHAPE = [1, 257, 768] - -# Image classification docstring -_IMAGE_CLASS_CHECKPOINT = "facebook/dinov2-base" - - -DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "facebook/dinov2-base", - # See all DINOv2 models at https://huggingface.co/models?filter=dinov2 -] - - -class Dinov2Embeddings(nn.Module): - """ - Construct the CLS token, mask token, position and patch embeddings. - """ - - def __init__(self, config: Dinov2Config) -> None: - super().__init__() - - self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size)) - self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size)) - self.patch_embeddings = Dinov2PatchEmbeddings(config) - num_patches = self.patch_embeddings.num_patches - self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size)) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.config = config - - def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: - """ - This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher - resolution images. - - Source: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 - """ - - num_patches = embeddings.shape[1] - 1 - num_positions = self.position_embeddings.shape[1] - 1 - if num_patches == num_positions and height == width: - return self.position_embeddings - class_pos_embed = self.position_embeddings[:, 0] - patch_pos_embed = self.position_embeddings[:, 1:] - dim = embeddings.shape[-1] - height = height // self.config.patch_size - width = width // self.config.patch_size - # we add a small number to avoid floating point error in the interpolation - # see discussion at https://github.com/facebookresearch/dino/issues/8 - height, width = height + 0.1, width + 0.1 - patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim) - patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) - patch_pos_embed = nn.functional.interpolate( - patch_pos_embed, - scale_factor=(height / math.sqrt(num_positions), width / math.sqrt(num_positions)), - mode="bicubic", - align_corners=False, - ) - if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]: - raise ValueError("Width or height does not match with the interpolated position embeddings") - patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) - return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) - - def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor: - batch_size, _, height, width = pixel_values.shape - embeddings = self.patch_embeddings(pixel_values) - - if bool_masked_pos is not None: - embeddings = torch.where( - bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings - ) - - # add the [CLS] token to the embedded patch tokens - cls_tokens = self.cls_token.expand(batch_size, -1, -1) - embeddings = torch.cat((cls_tokens, embeddings), dim=1) - - # add positional encoding to each token - embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) - - embeddings = self.dropout(embeddings) - - return embeddings - - -class Dinov2PatchEmbeddings(nn.Module): - """ - This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial - `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a - Transformer. - """ - - def __init__(self, config): - super().__init__() - image_size, patch_size = config.image_size, config.patch_size - num_channels, hidden_size = config.num_channels, config.hidden_size - - image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) - patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) - num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) - self.image_size = image_size - self.patch_size = patch_size - self.num_channels = num_channels - self.num_patches = num_patches - - self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) - - def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: - num_channels = pixel_values.shape[1] - if num_channels != self.num_channels: - raise ValueError( - "Make sure that the channel dimension of the pixel values match with the one set in the configuration." - f" Expected {self.num_channels} but got {num_channels}." - ) - embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) - return embeddings - - -# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->Dinov2 -class Dinov2SelfAttention(nn.Module): - def __init__(self, config: Dinov2Config) -> None: - super().__init__() - if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): - raise ValueError( - f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " - f"heads {config.num_attention_heads}." - ) - - self.num_attention_heads = config.num_attention_heads - self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - - self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) - self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) - self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) - - self.dropout = nn.Dropout(config.attention_probs_dropout_prob) - - def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: - new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) - x = x.view(new_x_shape) - return x.permute(0, 2, 1, 3) - - def forward( - self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False - ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: - mixed_query_layer = self.query(hidden_states) - - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - query_layer = self.transpose_for_scores(mixed_query_layer) - - # Take the dot product between "query" and "key" to get the raw attention scores. - attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) - - attention_scores = attention_scores / math.sqrt(self.attention_head_size) - - # Normalize the attention scores to probabilities. - attention_probs = nn.functional.softmax(attention_scores, dim=-1) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_probs = self.dropout(attention_probs) - - # Mask heads if we want to - if head_mask is not None: - attention_probs = attention_probs * head_mask - - context_layer = torch.matmul(attention_probs, value_layer) - - context_layer = context_layer.permute(0, 2, 1, 3).contiguous() - new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) - context_layer = context_layer.view(new_context_layer_shape) - - outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) - - return outputs - - -# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Dinov2 -class Dinov2SelfOutput(nn.Module): - """ - The residual connection is defined in Dinov2Layer instead of here (as is the case with other models), due to the - layernorm applied before each block. - """ - - def __init__(self, config: Dinov2Config) -> None: - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - - return hidden_states - - -# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->Dinov2 -class Dinov2Attention(nn.Module): - def __init__(self, config: Dinov2Config) -> None: - super().__init__() - self.attention = Dinov2SelfAttention(config) - self.output = Dinov2SelfOutput(config) - self.pruned_heads = set() - - def prune_heads(self, heads: Set[int]) -> None: - if len(heads) == 0: - return - heads, index = find_pruneable_heads_and_indices( - heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads - ) - - # Prune linear layers - self.attention.query = prune_linear_layer(self.attention.query, index) - self.attention.key = prune_linear_layer(self.attention.key, index) - self.attention.value = prune_linear_layer(self.attention.value, index) - self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) - - # Update hyper params and store pruned heads - self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) - self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads - self.pruned_heads = self.pruned_heads.union(heads) - - def forward( - self, - hidden_states: torch.Tensor, - head_mask: Optional[torch.Tensor] = None, - output_attentions: bool = False, - ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: - self_outputs = self.attention(hidden_states, head_mask, output_attentions) - - attention_output = self.output(self_outputs[0], hidden_states) - - outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them - return outputs - - -class Dinov2LayerScale(nn.Module): - def __init__(self, config) -> None: - super().__init__() - self.lambda1 = nn.Parameter(config.layerscale_value * torch.ones(config.hidden_size)) - - def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: - return hidden_state * self.lambda1 - - -# Copied from transformers.models.beit.modeling_beit.drop_path -def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: - """ - Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - - Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, - however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... - See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the - layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the - argument. - """ - if drop_prob == 0.0 or not training: - return input - keep_prob = 1 - drop_prob - shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets - random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) - random_tensor.floor_() # binarize - output = input.div(keep_prob) * random_tensor - return output - - -# Copied from transformers.models.beit.modeling_beit.BeitDropPath -class Dinov2DropPath(nn.Module): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" - - def __init__(self, drop_prob: Optional[float] = None) -> None: - super().__init__() - self.drop_prob = drop_prob - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - return drop_path(hidden_states, self.drop_prob, self.training) - - def extra_repr(self) -> str: - return "p={}".format(self.drop_prob) - - -class Dinov2MLP(nn.Module): - def __init__(self, config) -> None: - super().__init__() - in_features = out_features = config.hidden_size - hidden_features = int(config.hidden_size * config.mlp_ratio) - self.fc1 = nn.Linear(in_features, hidden_features, bias=True) - if isinstance(config.hidden_act, str): - self.activation = ACT2FN[config.hidden_act] - else: - self.activation = config.hidden_act - self.fc2 = nn.Linear(hidden_features, out_features, bias=True) - - def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: - hidden_state = self.fc1(hidden_state) - hidden_state = self.activation(hidden_state) - hidden_state = self.fc2(hidden_state) - return hidden_state - - -class Dinov2SwiGLUFFN(nn.Module): - def __init__(self, config) -> None: - super().__init__() - in_features = out_features = config.hidden_size - hidden_features = int(config.hidden_size * config.mlp_ratio) - hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8 - - self.weights_in = nn.Linear(in_features, 2 * hidden_features, bias=True) - self.weights_out = nn.Linear(hidden_features, out_features, bias=True) - - def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: - hidden_state = self.weights_in(hidden_state) - x1, x2 = hidden_state.chunk(2, dim=-1) - hidden = nn.functional.silu(x1) * x2 - return self.weights_out(hidden) - - -class Dinov2Layer(nn.Module): - """This corresponds to the Block class in the original implementation.""" - - def __init__(self, config: Dinov2Config) -> None: - super().__init__() - - self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.attention = Dinov2Attention(config) - self.layer_scale1 = Dinov2LayerScale(config) - self.drop_path1 = Dinov2DropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity() - - self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - - if config.use_swiglu_ffn: - self.mlp = Dinov2SwiGLUFFN(config) - else: - self.mlp = Dinov2MLP(config) - self.layer_scale2 = Dinov2LayerScale(config) - self.drop_path2 = Dinov2DropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity() - - def forward( - self, - hidden_states: torch.Tensor, - head_mask: Optional[torch.Tensor] = None, - output_attentions: bool = False, - ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: - self_attention_outputs = self.attention( - self.norm1(hidden_states), # in Dinov2, layernorm is applied before self-attention - head_mask, - output_attentions=output_attentions, - ) - attention_output = self_attention_outputs[0] - - attention_output = self.layer_scale1(attention_output) - outputs = self_attention_outputs[1:] # add self attentions if we output attention weights - - # first residual connection - hidden_states = attention_output + hidden_states - - # in Dinov2, layernorm is also applied after self-attention - layer_output = self.norm2(hidden_states) - layer_output = self.mlp(layer_output) - layer_output = self.layer_scale2(layer_output) - - # second residual connection - layer_output = layer_output + hidden_states - - outputs = (layer_output,) + outputs - - return outputs - - -# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->Dinov2 -class Dinov2Encoder(nn.Module): - def __init__(self, config: Dinov2Config) -> None: - super().__init__() - self.config = config - self.layer = nn.ModuleList([Dinov2Layer(config) for _ in range(config.num_hidden_layers)]) - self.gradient_checkpointing = False - - def forward( - self, - hidden_states: torch.Tensor, - head_mask: Optional[torch.Tensor] = None, - output_attentions: bool = False, - output_hidden_states: bool = False, - return_dict: bool = True, - ) -> Union[tuple, BaseModelOutput]: - all_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - - for i, layer_module in enumerate(self.layer): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - layer_head_mask = head_mask[i] if head_mask is not None else None - - if self.gradient_checkpointing and self.training: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs, output_attentions) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(layer_module), - hidden_states, - layer_head_mask, - ) - else: - layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) - - hidden_states = layer_outputs[0] - - if output_attentions: - all_self_attentions = all_self_attentions + (layer_outputs[1],) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) - return BaseModelOutput( - last_hidden_state=hidden_states, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - ) - - -class Dinov2PreTrainedModel(PreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = Dinov2Config - base_model_prefix = "dinov2" - main_input_name = "pixel_values" - supports_gradient_checkpointing = True - - def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: - """Initialize the weights""" - if isinstance(module, (nn.Linear, nn.Conv2d)): - # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid - # `trunc_normal_cpu` not implemented in `half` issues - module.weight.data = nn.init.trunc_normal_( - module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range - ).to(module.weight.dtype) - if module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - elif isinstance(module, Dinov2Embeddings): - module.position_embeddings.data = nn.init.trunc_normal_( - module.position_embeddings.data.to(torch.float32), - mean=0.0, - std=self.config.initializer_range, - ).to(module.position_embeddings.dtype) - - module.cls_token.data = nn.init.trunc_normal_( - module.cls_token.data.to(torch.float32), - mean=0.0, - std=self.config.initializer_range, - ).to(module.cls_token.dtype) - - def _set_gradient_checkpointing(self, module: Dinov2Encoder, value: bool = False) -> None: - if isinstance(module, Dinov2Encoder): - module.gradient_checkpointing = value - - -DINOV2_START_DOCSTRING = r""" - This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it - as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and - behavior. - - Parameters: - config ([`Dinov2Config`]): Model configuration class with all the parameters of the model. - Initializing with a config file does not load the weights associated with the model, only the - configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - -DINOV2_BASE_INPUTS_DOCSTRING = r""" - Args: - pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): - Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See - [`BitImageProcessor.preprocess`] for details. - - bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`): - Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for - pre-training. - - head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): - Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - -DINOV2_INPUTS_DOCSTRING = r""" - Args: - pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): - Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See - [`BitImageProcessor.preprocess`] for details. - - head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): - Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - - -@add_start_docstrings( - "The bare DINOv2 Model transformer outputting raw hidden-states without any specific head on top.", - DINOV2_START_DOCSTRING, -) -class Dinov2Model(Dinov2PreTrainedModel): - def __init__(self, config: Dinov2Config): - super().__init__(config) - self.config = config - - self.embeddings = Dinov2Embeddings(config) - self.encoder = Dinov2Encoder(config) - - self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self) -> Dinov2PatchEmbeddings: - return self.embeddings.patch_embeddings - - def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: - """ - Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base - class PreTrainedModel - """ - for layer, heads in heads_to_prune.items(): - self.encoder.layer[layer].attention.prune_heads(heads) - - @add_start_docstrings_to_model_forward(DINOV2_BASE_INPUTS_DOCSTRING) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=BaseModelOutputWithPooling, - config_class=_CONFIG_FOR_DOC, - modality="vision", - expected_output=_EXPECTED_OUTPUT_SHAPE, - ) - def forward( - self, - pixel_values: Optional[torch.Tensor] = None, - bool_masked_pos: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, BaseModelOutputWithPooling]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if pixel_values is None: - raise ValueError("You have to specify pixel_values") - - # Prepare head mask if needed - # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x n_heads x N x N - # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] - # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] - head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - - embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos) - - encoder_outputs = self.encoder( - embedding_output, - head_mask=head_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - sequence_output = encoder_outputs[0] - sequence_output = self.layernorm(sequence_output) - pooled_output = sequence_output[:, 0, :] - - if not return_dict: - head_outputs = (sequence_output, pooled_output) - return head_outputs + encoder_outputs[1:] - - return BaseModelOutputWithPooling( - last_hidden_state=sequence_output, - pooler_output=pooled_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - ) - - -@add_start_docstrings( - """ - Dinov2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state - of the [CLS] token) e.g. for ImageNet. - """, - DINOV2_START_DOCSTRING, -) -class Dinov2ForImageClassification(Dinov2PreTrainedModel): - def __init__(self, config: Dinov2Config) -> None: - super().__init__(config) - - self.num_labels = config.num_labels - self.dinov2 = Dinov2Model(config) - - # Classifier head - self.classifier = ( - nn.Linear(config.hidden_size * 2, config.num_labels) if config.num_labels > 0 else nn.Identity() - ) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING) - @add_code_sample_docstrings( - checkpoint=_IMAGE_CLASS_CHECKPOINT, - output_type=ImageClassifierOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - pixel_values: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[tuple, ImageClassifierOutput]: - r""" - labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for computing the image classification/regression loss. Indices should be in `[0, ..., - config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If - `config.num_labels > 1` a classification loss is computed (Cross-Entropy). - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.dinov2( - pixel_values, - head_mask=head_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - sequence_output = outputs[0] # batch_size, sequence_length, hidden_size - - cls_token = sequence_output[:, 0] - patch_tokens = sequence_output[:, 1:] - - linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1) - - logits = self.classifier(linear_input) - - loss = None - if labels is not None: - # move labels to correct device to enable model parallelism - labels = labels.to(logits.device) - if self.config.problem_type is None: - if self.num_labels == 1: - self.config.problem_type = "regression" - elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): - self.config.problem_type = "single_label_classification" - else: - self.config.problem_type = "multi_label_classification" - - if self.config.problem_type == "regression": - loss_fct = MSELoss() - if self.num_labels == 1: - loss = loss_fct(logits.squeeze(), labels.squeeze()) - else: - loss = loss_fct(logits, labels) - elif self.config.problem_type == "single_label_classification": - loss_fct = CrossEntropyLoss() - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) - elif self.config.problem_type == "multi_label_classification": - loss_fct = BCEWithLogitsLoss() - loss = loss_fct(logits, labels) - - if not return_dict: - output = (logits,) + outputs[2:] - return ((loss,) + output) if loss is not None else output - - return ImageClassifierOutput( - loss=loss, - logits=logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) - - -@add_start_docstrings( - """ - Dinov2 backbone, to be used with frameworks like DETR and MaskFormer. - """, - DINOV2_START_DOCSTRING, -) -class Dinov2Backbone(Dinov2PreTrainedModel, BackboneMixin): - def __init__(self, config): - super().__init__(config) - super()._init_backbone(config) - - self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)] - self.embeddings = Dinov2Embeddings(config) - self.encoder = Dinov2Encoder(config) - - self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self) -> Dinov2PatchEmbeddings: - return self.embeddings.patch_embeddings - - @add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) - def forward( - self, - pixel_values: torch.Tensor, - output_hidden_states: Optional[bool] = None, - output_attentions: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> BackboneOutput: - """ - Returns: - - Examples: - - ```python - >>> from transformers import AutoImageProcessor, AutoBackbone - >>> import torch - >>> from PIL import Image - >>> import requests - - >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" - >>> image = Image.open(requests.get(url, stream=True).raw) - - >>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base") - >>> model = AutoBackbone.from_pretrained( - ... "facebook/dinov2-base", out_features=["stage2", "stage5", "stage8", "stage11"] - ... ) - - >>> inputs = processor(image, return_tensors="pt") - - >>> outputs = model(**inputs) - >>> feature_maps = outputs.feature_maps - >>> list(feature_maps[-1].shape) - [1, 768, 16, 16] - ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - - embedding_output = self.embeddings(pixel_values) - - outputs = self.encoder( - embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict - ) - - hidden_states = outputs.hidden_states if return_dict else outputs[1] - - feature_maps = () - for stage, hidden_state in zip(self.stage_names, hidden_states): - if stage in self.out_features: - if self.config.apply_layernorm: - hidden_state = self.layernorm(hidden_state) - if self.config.reshape_hidden_states: - batch_size, _, height, width = pixel_values.shape - patch_size = self.config.patch_size - hidden_state = hidden_state[:, 1:, :].reshape( - batch_size, width // patch_size, height // patch_size, -1 - ) - hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() - feature_maps += (hidden_state,) - - if not return_dict: - if output_hidden_states: - output = (feature_maps,) + outputs[1:] - else: - output = (feature_maps,) + outputs[2:] - return output - - return BackboneOutput( - feature_maps=feature_maps, - hidden_states=outputs.hidden_states if output_hidden_states else None, - attentions=outputs.attentions if output_attentions else None, - ) diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/vdecoder/nsf_hifigan/models.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/vdecoder/nsf_hifigan/models.py deleted file mode 100644 index c2c889ec2fbd215702298ba2b7c411c6f5630d80..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/vdecoder/nsf_hifigan/models.py +++ /dev/null @@ -1,439 +0,0 @@ -import os -import json -from .env import AttrDict -import numpy as np -import torch -import torch.nn.functional as F -import torch.nn as nn -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from .utils import init_weights, get_padding - -LRELU_SLOPE = 0.1 - - -def load_model(model_path, device='cuda'): - h = load_config(model_path) - - generator = Generator(h).to(device) - - cp_dict = torch.load(model_path, map_location=device) - generator.load_state_dict(cp_dict['generator']) - generator.eval() - generator.remove_weight_norm() - del cp_dict - return generator, h - -def load_config(model_path): - config_file = os.path.join(os.path.split(model_path)[0], 'config.json') - with open(config_file) as f: - data = f.read() - - json_config = json.loads(data) - h = AttrDict(json_config) - return h - - -class ResBlock1(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.h = h - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - xt = c2(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.h = h - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class SineGen(torch.nn.Module): - """ Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__(self, samp_rate, harmonic_num=0, - sine_amp=0.1, noise_std=0.003, - voiced_threshold=0): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - @torch.no_grad() - def forward(self, f0, upp): - """ sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - f0 = f0.unsqueeze(-1) - fn = torch.multiply(f0, torch.arange(1, self.dim + 1, device=f0.device).reshape((1, 1, -1))) - rad_values = (fn / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand(fn.shape[0], fn.shape[2], device=fn.device) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - is_half = rad_values.dtype is not torch.float32 - tmp_over_one = torch.cumsum(rad_values.double(), 1) # % 1 #####%1意味着后面的cumsum无法再优化 - if is_half: - tmp_over_one = tmp_over_one.half() - else: - tmp_over_one = tmp_over_one.float() - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), scale_factor=upp, - mode='linear', align_corners=True - ).transpose(2, 1) - rad_values = F.interpolate(rad_values.transpose(2, 1), scale_factor=upp, mode='nearest').transpose(2, 1) - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - rad_values = rad_values.double() - cumsum_shift = cumsum_shift.double() - sine_waves = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi) - if is_half: - sine_waves = sine_waves.half() - else: - sine_waves = sine_waves.float() - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate(uv.transpose(2, 1), scale_factor=upp, mode='nearest').transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """ SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - - # to produce sine waveforms - self.l_sin_gen = SineGen(sampling_rate, harmonic_num, - sine_amp, add_noise_std, voiced_threshod) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge - - -class Generator(torch.nn.Module): - def __init__(self, h): - super(Generator, self).__init__() - self.h = h - self.num_kernels = len(h.resblock_kernel_sizes) - self.num_upsamples = len(h.upsample_rates) - self.m_source = SourceModuleHnNSF( - sampling_rate=h.sampling_rate, - harmonic_num=8 - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3)) - resblock = ResBlock1 if h.resblock == '1' else ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): - c_cur = h.upsample_initial_channel // (2 ** (i + 1)) - self.ups.append(weight_norm( - ConvTranspose1d(h.upsample_initial_channel // (2 ** i), h.upsample_initial_channel // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - if i + 1 < len(h.upsample_rates): # - stride_f0 = int(np.prod(h.upsample_rates[i + 1:])) - self.noise_convs.append(Conv1d( - 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2)) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - self.resblocks = nn.ModuleList() - ch = h.upsample_initial_channel - for i in range(len(self.ups)): - ch //= 2 - for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): - self.resblocks.append(resblock(h, ch, k, d)) - - self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) - self.ups.apply(init_weights) - self.conv_post.apply(init_weights) - self.upp = int(np.prod(h.upsample_rates)) - - def forward(self, x, f0): - har_source = self.m_source(f0, self.upp).transpose(1, 2) - x = self.conv_pre(x) - for i in range(self.num_upsamples): - x = F.leaky_relu(x, LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, periods=None): - super(MultiPeriodDiscriminator, self).__init__() - self.periods = periods if periods is not None else [2, 3, 5, 7, 11] - self.discriminators = nn.ModuleList() - for period in self.periods: - self.discriminators.append(DiscriminatorP(period)) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 128, 15, 1, padding=7)), - norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), - norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), - norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiScaleDiscriminator(torch.nn.Module): - def __init__(self): - super(MultiScaleDiscriminator, self).__init__() - self.discriminators = nn.ModuleList([ - DiscriminatorS(use_spectral_norm=True), - DiscriminatorS(), - DiscriminatorS(), - ]) - self.meanpools = nn.ModuleList([ - AvgPool1d(4, 2, padding=2), - AvgPool1d(4, 2, padding=2) - ]) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - if i != 0: - y = self.meanpools[i - 1](y) - y_hat = self.meanpools[i - 1](y_hat) - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - r_loss = torch.mean((1 - dr) ** 2) - g_loss = torch.mean(dg ** 2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/cityscapes_panoptic.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/cityscapes_panoptic.py deleted file mode 100644 index 48c136f1623261b079591065fec7c7fc38165076..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/cityscapes_panoptic.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import json -import logging -import os - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.data.datasets.builtin_meta import CITYSCAPES_CATEGORIES -from detectron2.utils.file_io import PathManager - -""" -This file contains functions to register the Cityscapes panoptic dataset to the DatasetCatalog. -""" - - -logger = logging.getLogger(__name__) - - -def get_cityscapes_panoptic_files(image_dir, gt_dir, json_info): - files = [] - # scan through the directory - cities = PathManager.ls(image_dir) - logger.info(f"{len(cities)} cities found in '{image_dir}'.") - image_dict = {} - for city in cities: - city_img_dir = os.path.join(image_dir, city) - for basename in PathManager.ls(city_img_dir): - image_file = os.path.join(city_img_dir, basename) - - suffix = "_leftImg8bit.png" - assert basename.endswith(suffix), basename - basename = os.path.basename(basename)[: -len(suffix)] - - image_dict[basename] = image_file - - for ann in json_info["annotations"]: - image_file = image_dict.get(ann["image_id"], None) - assert image_file is not None, "No image {} found for annotation {}".format( - ann["image_id"], ann["file_name"] - ) - label_file = os.path.join(gt_dir, ann["file_name"]) - segments_info = ann["segments_info"] - - files.append((image_file, label_file, segments_info)) - - assert len(files), "No images found in {}".format(image_dir) - assert PathManager.isfile(files[0][0]), files[0][0] - assert PathManager.isfile(files[0][1]), files[0][1] - return files - - -def load_cityscapes_panoptic(image_dir, gt_dir, gt_json, meta): - """ - Args: - image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train". - gt_dir (str): path to the raw annotations. e.g., - "~/cityscapes/gtFine/cityscapes_panoptic_train". - gt_json (str): path to the json file. e.g., - "~/cityscapes/gtFine/cityscapes_panoptic_train.json". - meta (dict): dictionary containing "thing_dataset_id_to_contiguous_id" - and "stuff_dataset_id_to_contiguous_id" to map category ids to - contiguous ids for training. - - Returns: - list[dict]: a list of dicts in Detectron2 standard format. (See - `Using Custom Datasets `_ ) - """ - - def _convert_category_id(segment_info, meta): - if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]: - segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][ - segment_info["category_id"] - ] - else: - segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][ - segment_info["category_id"] - ] - return segment_info - - assert os.path.exists( - gt_json - ), "Please run `python cityscapesscripts/preparation/createPanopticImgs.py` to generate label files." # noqa - with open(gt_json) as f: - json_info = json.load(f) - files = get_cityscapes_panoptic_files(image_dir, gt_dir, json_info) - ret = [] - for image_file, label_file, segments_info in files: - sem_label_file = ( - image_file.replace("leftImg8bit", "gtFine").split(".")[0] + "_labelTrainIds.png" - ) - segments_info = [_convert_category_id(x, meta) for x in segments_info] - ret.append( - { - "file_name": image_file, - "image_id": "_".join( - os.path.splitext(os.path.basename(image_file))[0].split("_")[:3] - ), - "sem_seg_file_name": sem_label_file, - "pan_seg_file_name": label_file, - "segments_info": segments_info, - } - ) - assert len(ret), f"No images found in {image_dir}!" - assert PathManager.isfile( - ret[0]["sem_seg_file_name"] - ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa - assert PathManager.isfile( - ret[0]["pan_seg_file_name"] - ), "Please generate panoptic annotation with python cityscapesscripts/preparation/createPanopticImgs.py" # noqa - return ret - - -_RAW_CITYSCAPES_PANOPTIC_SPLITS = { - "cityscapes_fine_panoptic_train": ( - "cityscapes/leftImg8bit/train", - "cityscapes/gtFine/cityscapes_panoptic_train", - "cityscapes/gtFine/cityscapes_panoptic_train.json", - ), - "cityscapes_fine_panoptic_val": ( - "cityscapes/leftImg8bit/val", - "cityscapes/gtFine/cityscapes_panoptic_val", - "cityscapes/gtFine/cityscapes_panoptic_val.json", - ), - # "cityscapes_fine_panoptic_test": not supported yet -} - - -def register_all_cityscapes_panoptic(root): - meta = {} - # The following metadata maps contiguous id from [0, #thing categories + - # #stuff categories) to their names and colors. We have to replica of the - # same name and color under "thing_*" and "stuff_*" because the current - # visualization function in D2 handles thing and class classes differently - # due to some heuristic used in Panoptic FPN. We keep the same naming to - # enable reusing existing visualization functions. - thing_classes = [k["name"] for k in CITYSCAPES_CATEGORIES] - thing_colors = [k["color"] for k in CITYSCAPES_CATEGORIES] - stuff_classes = [k["name"] for k in CITYSCAPES_CATEGORIES] - stuff_colors = [k["color"] for k in CITYSCAPES_CATEGORIES] - - meta["thing_classes"] = thing_classes - meta["thing_colors"] = thing_colors - meta["stuff_classes"] = stuff_classes - meta["stuff_colors"] = stuff_colors - - # There are three types of ids in cityscapes panoptic segmentation: - # (1) category id: like semantic segmentation, it is the class id for each - # pixel. Since there are some classes not used in evaluation, the category - # id is not always contiguous and thus we have two set of category ids: - # - original category id: category id in the original dataset, mainly - # used for evaluation. - # - contiguous category id: [0, #classes), in order to train the classifier - # (2) instance id: this id is used to differentiate different instances from - # the same category. For "stuff" classes, the instance id is always 0; for - # "thing" classes, the instance id starts from 1 and 0 is reserved for - # ignored instances (e.g. crowd annotation). - # (3) panoptic id: this is the compact id that encode both category and - # instance id by: category_id * 1000 + instance_id. - thing_dataset_id_to_contiguous_id = {} - stuff_dataset_id_to_contiguous_id = {} - - for k in CITYSCAPES_CATEGORIES: - if k["isthing"] == 1: - thing_dataset_id_to_contiguous_id[k["id"]] = k["trainId"] - else: - stuff_dataset_id_to_contiguous_id[k["id"]] = k["trainId"] - - meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id - meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id - - for key, (image_dir, gt_dir, gt_json) in _RAW_CITYSCAPES_PANOPTIC_SPLITS.items(): - image_dir = os.path.join(root, image_dir) - gt_dir = os.path.join(root, gt_dir) - gt_json = os.path.join(root, gt_json) - - DatasetCatalog.register( - key, lambda x=image_dir, y=gt_dir, z=gt_json: load_cityscapes_panoptic(x, y, z, meta) - ) - MetadataCatalog.get(key).set( - panoptic_root=gt_dir, - image_root=image_dir, - panoptic_json=gt_json, - gt_dir=gt_dir.replace("cityscapes_panoptic_", ""), - evaluator_type="cityscapes_panoptic_seg", - ignore_label=255, - label_divisor=1000, - **meta, - ) diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/coco_panoptic.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/coco_panoptic.py deleted file mode 100644 index b8dae44317b556610d7fed39017e082d7e855956..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/coco_panoptic.py +++ /dev/null @@ -1,228 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import copy -import json -import os - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.utils.file_io import PathManager - -from .coco import load_coco_json, load_sem_seg - -__all__ = ["register_coco_panoptic", "register_coco_panoptic_separated"] - - -def load_coco_panoptic_json(json_file, image_dir, gt_dir, meta): - """ - Args: - image_dir (str): path to the raw dataset. e.g., "~/coco/train2017". - gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017". - json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json". - - Returns: - list[dict]: a list of dicts in Detectron2 standard format. (See - `Using Custom Datasets `_ ) - """ - - def _convert_category_id(segment_info, meta): - if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]: - segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][ - segment_info["category_id"] - ] - segment_info["isthing"] = True - else: - segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][ - segment_info["category_id"] - ] - segment_info["isthing"] = False - return segment_info - - with PathManager.open(json_file) as f: - json_info = json.load(f) - - ret = [] - for ann in json_info["annotations"]: - image_id = int(ann["image_id"]) - # TODO: currently we assume image and label has the same filename but - # different extension, and images have extension ".jpg" for COCO. Need - # to make image extension a user-provided argument if we extend this - # function to support other COCO-like datasets. - image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg") - label_file = os.path.join(gt_dir, ann["file_name"]) - segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]] - ret.append( - { - "file_name": image_file, - "image_id": image_id, - "pan_seg_file_name": label_file, - "segments_info": segments_info, - } - ) - assert len(ret), f"No images found in {image_dir}!" - assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"] - assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"] - return ret - - -def register_coco_panoptic( - name, metadata, image_root, panoptic_root, panoptic_json, instances_json=None -): - """ - Register a "standard" version of COCO panoptic segmentation dataset named `name`. - The dictionaries in this registered dataset follows detectron2's standard format. - Hence it's called "standard". - - Args: - name (str): the name that identifies a dataset, - e.g. "coco_2017_train_panoptic" - metadata (dict): extra metadata associated with this dataset. - image_root (str): directory which contains all the images - panoptic_root (str): directory which contains panoptic annotation images in COCO format - panoptic_json (str): path to the json panoptic annotation file in COCO format - sem_seg_root (none): not used, to be consistent with - `register_coco_panoptic_separated`. - instances_json (str): path to the json instance annotation file - """ - panoptic_name = name - DatasetCatalog.register( - panoptic_name, - lambda: load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, metadata), - ) - MetadataCatalog.get(panoptic_name).set( - panoptic_root=panoptic_root, - image_root=image_root, - panoptic_json=panoptic_json, - json_file=instances_json, - evaluator_type="coco_panoptic_seg", - ignore_label=255, - label_divisor=1000, - **metadata, - ) - - -def register_coco_panoptic_separated( - name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json -): - """ - Register a "separated" version of COCO panoptic segmentation dataset named `name`. - The annotations in this registered dataset will contain both instance annotations and - semantic annotations, each with its own contiguous ids. Hence it's called "separated". - - It follows the setting used by the PanopticFPN paper: - - 1. The instance annotations directly come from polygons in the COCO - instances annotation task, rather than from the masks in the COCO panoptic annotations. - - The two format have small differences: - Polygons in the instance annotations may have overlaps. - The mask annotations are produced by labeling the overlapped polygons - with depth ordering. - - 2. The semantic annotations are converted from panoptic annotations, where - all "things" are assigned a semantic id of 0. - All semantic categories will therefore have ids in contiguous - range [1, #stuff_categories]. - - This function will also register a pure semantic segmentation dataset - named ``name + '_stuffonly'``. - - Args: - name (str): the name that identifies a dataset, - e.g. "coco_2017_train_panoptic" - metadata (dict): extra metadata associated with this dataset. - image_root (str): directory which contains all the images - panoptic_root (str): directory which contains panoptic annotation images - panoptic_json (str): path to the json panoptic annotation file - sem_seg_root (str): directory which contains all the ground truth segmentation annotations. - instances_json (str): path to the json instance annotation file - """ - panoptic_name = name + "_separated" - DatasetCatalog.register( - panoptic_name, - lambda: merge_to_panoptic( - load_coco_json(instances_json, image_root, panoptic_name), - load_sem_seg(sem_seg_root, image_root), - ), - ) - MetadataCatalog.get(panoptic_name).set( - panoptic_root=panoptic_root, - image_root=image_root, - panoptic_json=panoptic_json, - sem_seg_root=sem_seg_root, - json_file=instances_json, # TODO rename - evaluator_type="coco_panoptic_seg", - ignore_label=255, - **metadata, - ) - - semantic_name = name + "_stuffonly" - DatasetCatalog.register(semantic_name, lambda: load_sem_seg(sem_seg_root, image_root)) - MetadataCatalog.get(semantic_name).set( - sem_seg_root=sem_seg_root, - image_root=image_root, - evaluator_type="sem_seg", - ignore_label=255, - **metadata, - ) - - -def merge_to_panoptic(detection_dicts, sem_seg_dicts): - """ - Create dataset dicts for panoptic segmentation, by - merging two dicts using "file_name" field to match their entries. - - Args: - detection_dicts (list[dict]): lists of dicts for object detection or instance segmentation. - sem_seg_dicts (list[dict]): lists of dicts for semantic segmentation. - - Returns: - list[dict] (one per input image): Each dict contains all (key, value) pairs from dicts in - both detection_dicts and sem_seg_dicts that correspond to the same image. - The function assumes that the same key in different dicts has the same value. - """ - results = [] - sem_seg_file_to_entry = {x["file_name"]: x for x in sem_seg_dicts} - assert len(sem_seg_file_to_entry) > 0 - - for det_dict in detection_dicts: - dic = copy.copy(det_dict) - dic.update(sem_seg_file_to_entry[dic["file_name"]]) - results.append(dic) - return results - - -if __name__ == "__main__": - """ - Test the COCO panoptic dataset loader. - - Usage: - python -m detectron2.data.datasets.coco_panoptic \ - path/to/image_root path/to/panoptic_root path/to/panoptic_json dataset_name 10 - - "dataset_name" can be "coco_2017_train_panoptic", or other - pre-registered ones - """ - from detectron2.utils.logger import setup_logger - from detectron2.utils.visualizer import Visualizer - import detectron2.data.datasets # noqa # add pre-defined metadata - import sys - from PIL import Image - import numpy as np - - logger = setup_logger(name=__name__) - assert sys.argv[4] in DatasetCatalog.list() - meta = MetadataCatalog.get(sys.argv[4]) - - dicts = load_coco_panoptic_json(sys.argv[3], sys.argv[1], sys.argv[2], meta.as_dict()) - logger.info("Done loading {} samples.".format(len(dicts))) - - dirname = "coco-data-vis" - os.makedirs(dirname, exist_ok=True) - num_imgs_to_vis = int(sys.argv[5]) - for i, d in enumerate(dicts): - img = np.array(Image.open(d["file_name"])) - visualizer = Visualizer(img, metadata=meta) - vis = visualizer.draw_dataset_dict(d) - fpath = os.path.join(dirname, os.path.basename(d["file_name"])) - vis.save(fpath) - if i + 1 >= num_imgs_to_vis: - break diff --git a/spaces/yunfei0710/gpt-academic/crazy_functions/test_project/latex/attention/model_architecture.tex b/spaces/yunfei0710/gpt-academic/crazy_functions/test_project/latex/attention/model_architecture.tex deleted file mode 100644 index c82be6242cc9d26203360e90d3ac9184ef6ad842..0000000000000000000000000000000000000000 --- a/spaces/yunfei0710/gpt-academic/crazy_functions/test_project/latex/attention/model_architecture.tex +++ /dev/null @@ -1,155 +0,0 @@ - -\begin{figure} - \centering - \includegraphics[scale=0.6]{Figures/ModalNet-21} - \caption{The Transformer - model architecture.} - \label{fig:model-arch} -\end{figure} - -% Although the primary workhorse of our model is attention, -%Our model maintains the encoder-decoder structure that is common to many so-called sequence-to-sequence models \citep{bahdanau2014neural,sutskever14}. As in all such architectures, the encoder computes a representation of the input sequence, and the decoder consumes these representations along with the output tokens to autoregressively produce the output sequence. Where, traditionally, the encoder and decoder contain stacks of recurrent or convolutional layers, our encoder and decoder stacks are composed of attention layers and position-wise feed-forward layers (Figure~\ref{fig:model-arch}). The following sections describe the gross architecture and these particular components in detail. - -Most competitive neural sequence transduction models have an encoder-decoder structure \citep{cho2014learning,bahdanau2014neural,sutskever14}. Here, the encoder maps an input sequence of symbol representations $(x_1, ..., x_n)$ to a sequence of continuous representations $\mathbf{z} = (z_1, ..., z_n)$. Given $\mathbf{z}$, the decoder then generates an output sequence $(y_1,...,y_m)$ of symbols one element at a time. At each step the model is auto-regressive \citep{graves2013generating}, consuming the previously generated symbols as additional input when generating the next. - -The Transformer follows this overall architecture using stacked self-attention and point-wise, fully connected layers for both the encoder and decoder, shown in the left and right halves of Figure~\ref{fig:model-arch}, respectively. - -\subsection{Encoder and Decoder Stacks} - -\paragraph{Encoder:}The encoder is composed of a stack of $N=6$ identical layers. Each layer has two sub-layers. The first is a multi-head self-attention mechanism, and the second is a simple, position-wise fully connected feed-forward network. We employ a residual connection \citep{he2016deep} around each of the two sub-layers, followed by layer normalization \cite{layernorm2016}. That is, the output of each sub-layer is $\mathrm{LayerNorm}(x + \mathrm{Sublayer}(x))$, where $\mathrm{Sublayer}(x)$ is the function implemented by the sub-layer itself. To facilitate these residual connections, all sub-layers in the model, as well as the embedding layers, produce outputs of dimension $\dmodel=512$. - -\paragraph{Decoder:}The decoder is also composed of a stack of $N=6$ identical layers. In addition to the two sub-layers in each encoder layer, the decoder inserts a third sub-layer, which performs multi-head attention over the output of the encoder stack. Similar to the encoder, we employ residual connections around each of the sub-layers, followed by layer normalization. We also modify the self-attention sub-layer in the decoder stack to prevent positions from attending to subsequent positions. This masking, combined with fact that the output embeddings are offset by one position, ensures that the predictions for position $i$ can depend only on the known outputs at positions less than $i$. - -% In our model (Figure~\ref{fig:model-arch}), the encoder and decoder are composed of stacks of alternating self-attention layers (for cross-positional communication) and position-wise feed-forward layers (for in-place computation). In addition, the decoder stack contains encoder-decoder attention layers. Since attention is agnostic to the distances between words, our model requires a "positional encoding" to be added to the encoder and decoder input. The following sections describe all of these components in detail. - -\subsection{Attention} \label{sec:attention} -An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors. The output is computed as a weighted sum of the values, where the weight assigned to each value is computed by a compatibility function of the query with the corresponding key. - -\subsubsection{Scaled Dot-Product Attention} \label{sec:scaled-dot-prod} - -% \begin{figure} -% \centering -% \includegraphics[scale=0.6]{Figures/ModalNet-19} -% \caption{Scaled Dot-Product Attention.} -% \label{fig:multi-head-att} -% \end{figure} - -We call our particular attention "Scaled Dot-Product Attention" (Figure~\ref{fig:multi-head-att}). The input consists of queries and keys of dimension $d_k$, and values of dimension $d_v$. We compute the dot products of the query with all keys, divide each by $\sqrt{d_k}$, and apply a softmax function to obtain the weights on the values. - -In practice, we compute the attention function on a set of queries simultaneously, packed together into a matrix $Q$. The keys and values are also packed together into matrices $K$ and $V$. We compute the matrix of outputs as: - -\begin{equation} - \mathrm{Attention}(Q, K, V) = \mathrm{softmax}(\frac{QK^T}{\sqrt{d_k}})V -\end{equation} - -The two most commonly used attention functions are additive attention \citep{bahdanau2014neural}, and dot-product (multiplicative) attention. Dot-product attention is identical to our algorithm, except for the scaling factor of $\frac{1}{\sqrt{d_k}}$. Additive attention computes the compatibility function using a feed-forward network with a single hidden layer. While the two are similar in theoretical complexity, dot-product attention is much faster and more space-efficient in practice, since it can be implemented using highly optimized matrix multiplication code. - -%We scale the dot products by $1/\sqrt{d_k}$ to limit the magnitude of the dot products, which works well in practice. Otherwise, we found applying the softmax to often result in weights very close to 0 or 1, and hence minuscule gradients. - -% Already described in the subsequent section -%When used as part of decoder self-attention, an optional mask function is applied just before the softmax to prevent positions from attending to subsequent positions. This mask simply sets the logits corresponding to all illegal connections (those outside of the lower triangle) to $-\infty$. - -%\paragraph{Comparison to Additive Attention: } We choose dot product attention over additive attention \citep{bahdanau2014neural} since it can be computed using highly optimized matrix multiplication code. This optimization is particularly important to us, as we employ many attention layers in our model. - -While for small values of $d_k$ the two mechanisms perform similarly, additive attention outperforms dot product attention without scaling for larger values of $d_k$ \citep{DBLP:journals/corr/BritzGLL17}. We suspect that for large values of $d_k$, the dot products grow large in magnitude, pushing the softmax function into regions where it has extremely small gradients \footnote{To illustrate why the dot products get large, assume that the components of $q$ and $k$ are independent random variables with mean $0$ and variance $1$. Then their dot product, $q \cdot k = \sum_{i=1}^{d_k} q_ik_i$, has mean $0$ and variance $d_k$.}. To counteract this effect, we scale the dot products by $\frac{1}{\sqrt{d_k}}$. - - -%We suspect this to be caused by the dot products growing too large in magnitude to result in useful gradients after applying the softmax function. To counteract this, we scale the dot product by $1/\sqrt{d_k}$. - - -\subsubsection{Multi-Head Attention} \label{sec:multihead} - -\begin{figure} -\begin{minipage}[t]{0.5\textwidth} - \centering - Scaled Dot-Product Attention \\ - \vspace{0.5cm} - \includegraphics[scale=0.6]{Figures/ModalNet-19} -\end{minipage} -\begin{minipage}[t]{0.5\textwidth} - \centering - Multi-Head Attention \\ - \vspace{0.1cm} - \includegraphics[scale=0.6]{Figures/ModalNet-20} -\end{minipage} - - - % \centering - - \caption{(left) Scaled Dot-Product Attention. (right) Multi-Head Attention consists of several attention layers running in parallel.} - \label{fig:multi-head-att} -\end{figure} - -Instead of performing a single attention function with $\dmodel$-dimensional keys, values and queries, we found it beneficial to linearly project the queries, keys and values $h$ times with different, learned linear projections to $d_k$, $d_k$ and $d_v$ dimensions, respectively. -On each of these projected versions of queries, keys and values we then perform the attention function in parallel, yielding $d_v$-dimensional output values. These are concatenated and once again projected, resulting in the final values, as depicted in Figure~\ref{fig:multi-head-att}. - -Multi-head attention allows the model to jointly attend to information from different representation subspaces at different positions. With a single attention head, averaging inhibits this. - -\begin{align*} - \mathrm{MultiHead}(Q, K, V) &= \mathrm{Concat}(\mathrm{head_1}, ..., \mathrm{head_h})W^O\\ -% \mathrm{where} \mathrm{head_i} &= \mathrm{Attention}(QW_Q_i^{\dmodel \times d_q}, KW_K_i^{\dmodel \times d_k}, VW^V_i^{\dmodel \times d_v})\\ - \text{where}~\mathrm{head_i} &= \mathrm{Attention}(QW^Q_i, KW^K_i, VW^V_i)\\ -\end{align*} - -Where the projections are parameter matrices $W^Q_i \in \mathbb{R}^{\dmodel \times d_k}$, $W^K_i \in \mathbb{R}^{\dmodel \times d_k}$, $W^V_i \in \mathbb{R}^{\dmodel \times d_v}$ and $W^O \in \mathbb{R}^{hd_v \times \dmodel}$. - - -%find it better (and no more expensive) to have multiple parallel attention layers (each over the full set of positions) with proportionally lower-dimensional keys, values and queries. We call this "Multi-Head Attention" (Figure~\ref{fig:multi-head-att}). The keys, values, and queries for each of these parallel attention layers are computed by learned linear transformations of the inputs to the multi-head attention. We use different linear transformations across different parallel attention layers. The output of the parallel attention layers are concatenated, and then passed through a final learned linear transformation. - -In this work we employ $h=8$ parallel attention layers, or heads. For each of these we use $d_k=d_v=\dmodel/h=64$. -Due to the reduced dimension of each head, the total computational cost is similar to that of single-head attention with full dimensionality. - -\subsubsection{Applications of Attention in our Model} - -The Transformer uses multi-head attention in three different ways: -\begin{itemize} - \item In "encoder-decoder attention" layers, the queries come from the previous decoder layer, and the memory keys and values come from the output of the encoder. This allows every position in the decoder to attend over all positions in the input sequence. This mimics the typical encoder-decoder attention mechanisms in sequence-to-sequence models such as \citep{wu2016google, bahdanau2014neural,JonasFaceNet2017}. - - \item The encoder contains self-attention layers. In a self-attention layer all of the keys, values and queries come from the same place, in this case, the output of the previous layer in the encoder. Each position in the encoder can attend to all positions in the previous layer of the encoder. - - \item Similarly, self-attention layers in the decoder allow each position in the decoder to attend to all positions in the decoder up to and including that position. We need to prevent leftward information flow in the decoder to preserve the auto-regressive property. We implement this inside of scaled dot-product attention by masking out (setting to $-\infty$) all values in the input of the softmax which correspond to illegal connections. See Figure~\ref{fig:multi-head-att}. - -\end{itemize} - -\subsection{Position-wise Feed-Forward Networks}\label{sec:ffn} - -In addition to attention sub-layers, each of the layers in our encoder and decoder contains a fully connected feed-forward network, which is applied to each position separately and identically. This consists of two linear transformations with a ReLU activation in between. - -\begin{equation} - \mathrm{FFN}(x)=\max(0, xW_1 + b_1) W_2 + b_2 -\end{equation} - -While the linear transformations are the same across different positions, they use different parameters from layer to layer. Another way of describing this is as two convolutions with kernel size 1. The dimensionality of input and output is $\dmodel=512$, and the inner-layer has dimensionality $d_{ff}=2048$. - - - -%In the appendix, we describe how the position-wise feed-forward network can also be seen as a form of attention. - -%from Jakob: The number of operations required for the model to relate signals from two arbitrary input or output positions grows in the distance between positions in input or output, linearly for ConvS2S and logarithmically for ByteNet, making it harder to learn dependencies between these positions \citep{hochreiter2001gradient}. In the transformer this is reduced to a constant number of operations, albeit at the cost of effective resolution caused by averaging attention-weighted positions, an effect we aim to counteract with multi-headed attention. - - -%Figure~\ref{fig:simple-att} presents a simple attention function, $A$, with a single head, that forms the basis of our multi-head attention. $A$ takes a query key vector $\kq$, matrices of memory keys $\km$ and memory values $\vm$ ,and produces a query value vector $\vq$ as -%\begin{equation*} \label{eq:attention} -% A(\kq, \km, \vm) = {\vm}^T (Softmax(\km \kq). -%\end{equation*} -%We linearly transform $\kq,\,\km$, and $\vm$ with learned matrices ${\Wkq \text{,} \, \Wkm}$, and ${\Wvm}$ before calling the attention function, and transform the output query with $\Wvq$ before handing it to the feed forward layer. Each attention layer has it's own set of transformation matrices, which are shared across all query positions. $A$ is applied in parallel for each query position, and is implemented very efficiently as a batch of matrix multiplies. The self-attention and encoder-decoder attention layers use $A$, but with different arguments. For example, in encdoder self-attention, queries in encoder layer $i$ attention to memories in encoder layer $i-1$. To ensure that decoder self-attention layers do not look at future words, we add $- \inf$ to the softmax logits in positions $j+1$ to query length for query position $l$. - -%In simple attention, the query value is a weighted combination of the memory values where the attention weights sum to one. Although this function performs well in practice, the constraint on attention weights can restrict the amount of information that flows from memories to queries because the query cannot focus on multiple memory positions at once, which might be desirable when translating long sequences. \marginpar{@usz, could you think of an example of this ?} We remedy this by maintaining multiple attention heads at each query position that attend to all memory positions in parallel, with a different set of parameters per attention head $h$. -%\marginpar{} - -\subsection{Embeddings and Softmax} -Similarly to other sequence transduction models, we use learned embeddings to convert the input tokens and output tokens to vectors of dimension $\dmodel$. We also use the usual learned linear transformation and softmax function to convert the decoder output to predicted next-token probabilities. In our model, we share the same weight matrix between the two embedding layers and the pre-softmax linear transformation, similar to \citep{press2016using}. In the embedding layers, we multiply those weights by $\sqrt{\dmodel}$. - - -\subsection{Positional Encoding} -Since our model contains no recurrence and no convolution, in order for the model to make use of the order of the sequence, we must inject some information about the relative or absolute position of the tokens in the sequence. To this end, we add "positional encodings" to the input embeddings at the bottoms of the encoder and decoder stacks. The positional encodings have the same dimension $\dmodel$ as the embeddings, so that the two can be summed. There are many choices of positional encodings, learned and fixed \citep{JonasFaceNet2017}. - -In this work, we use sine and cosine functions of different frequencies: - -\begin{align*} - PE_{(pos,2i)} = sin(pos / 10000^{2i/\dmodel}) \\ - PE_{(pos,2i+1)} = cos(pos / 10000^{2i/\dmodel}) -\end{align*} - -where $pos$ is the position and $i$ is the dimension. That is, each dimension of the positional encoding corresponds to a sinusoid. The wavelengths form a geometric progression from $2\pi$ to $10000 \cdot 2\pi$. We chose this function because we hypothesized it would allow the model to easily learn to attend by relative positions, since for any fixed offset $k$, $PE_{pos+k}$ can be represented as a linear function of $PE_{pos}$. - -We also experimented with using learned positional embeddings \citep{JonasFaceNet2017} instead, and found that the two versions produced nearly identical results (see Table~\ref{tab:variations} row (E)). We chose the sinusoidal version because it may allow the model to extrapolate to sequence lengths longer than the ones encountered during training. diff --git a/spaces/zetavg/LLaMA-LoRA-Tuner-UI-Demo/llama_lora/utils/lru_cache.py b/spaces/zetavg/LLaMA-LoRA-Tuner-UI-Demo/llama_lora/utils/lru_cache.py deleted file mode 100644 index 28d6abe77e7f42865e2a5d62295c6ea4545a80b2..0000000000000000000000000000000000000000 --- a/spaces/zetavg/LLaMA-LoRA-Tuner-UI-Demo/llama_lora/utils/lru_cache.py +++ /dev/null @@ -1,31 +0,0 @@ -from collections import OrderedDict - - -class LRUCache: - def __init__(self, capacity=5): - self.cache = OrderedDict() - self.capacity = capacity - - def get(self, key): - if key in self.cache: - # Move the accessed item to the end of the OrderedDict - self.cache.move_to_end(key) - return self.cache[key] - return None - - def set(self, key, value): - if key in self.cache: - # If the key already exists, update its value - self.cache[key] = value - else: - # If the cache has reached its capacity, remove the least recently used item - if len(self.cache) >= self.capacity: - self.cache.popitem(last=False) - self.cache[key] = value - - def clear(self): - self.cache.clear() - - def prepare_to_set(self): - if len(self.cache) >= self.capacity: - self.cache.popitem(last=False) diff --git a/spaces/zhenwusw/JoJoGAN/e4e/editings/sefa.py b/spaces/zhenwusw/JoJoGAN/e4e/editings/sefa.py deleted file mode 100644 index db7083ce463b765a7cf452807883a3b85fb63fa5..0000000000000000000000000000000000000000 --- a/spaces/zhenwusw/JoJoGAN/e4e/editings/sefa.py +++ /dev/null @@ -1,46 +0,0 @@ -import torch -import numpy as np -from tqdm import tqdm - - -def edit(generator, latents, indices, semantics=1, start_distance=-15.0, end_distance=15.0, num_samples=1, step=11): - - layers, boundaries, values = factorize_weight(generator, indices) - codes = latents.detach().cpu().numpy() # (1,18,512) - - # Generate visualization pages. - distances = np.linspace(start_distance, end_distance, step) - num_sam = num_samples - num_sem = semantics - - edited_latents = [] - for sem_id in tqdm(range(num_sem), desc='Semantic ', leave=False): - boundary = boundaries[sem_id:sem_id + 1] - for sam_id in tqdm(range(num_sam), desc='Sample ', leave=False): - code = codes[sam_id:sam_id + 1] - for col_id, d in enumerate(distances, start=1): - temp_code = code.copy() - temp_code[:, layers, :] += boundary * d - edited_latents.append(torch.from_numpy(temp_code).float().cuda()) - return torch.cat(edited_latents) - - -def factorize_weight(g_ema, layers='all'): - - weights = [] - if layers == 'all' or 0 in layers: - weight = g_ema.conv1.conv.modulation.weight.T - weights.append(weight.cpu().detach().numpy()) - - if layers == 'all': - layers = list(range(g_ema.num_layers - 1)) - else: - layers = [l - 1 for l in layers if l != 0] - - for idx in layers: - weight = g_ema.convs[idx].conv.modulation.weight.T - weights.append(weight.cpu().detach().numpy()) - weight = np.concatenate(weights, axis=1).astype(np.float32) - weight = weight / np.linalg.norm(weight, axis=0, keepdims=True) - eigen_values, eigen_vectors = np.linalg.eig(weight.dot(weight.T)) - return layers, eigen_vectors.T, eigen_values diff --git a/spaces/zomehwh/sovits-tannhauser/modules/commons.py b/spaces/zomehwh/sovits-tannhauser/modules/commons.py deleted file mode 100644 index 074888006392e956ce204d8368362dbb2cd4e304..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/sovits-tannhauser/modules/commons.py +++ /dev/null @@ -1,188 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -def slice_pitch_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - -def rand_slice_segments_with_pitch(x, pitch, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - ret_pitch = slice_pitch_segments(pitch, ids_str, segment_size) - return ret, ret_pitch, ids_str - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def rand_spec_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/zomehwh/sovits-teio/inference_main.py b/spaces/zomehwh/sovits-teio/inference_main.py deleted file mode 100644 index 3b2c32ac9e29e6b016e656e937fede5d2c23e7e6..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/sovits-teio/inference_main.py +++ /dev/null @@ -1,130 +0,0 @@ -import io -import logging -import time -from pathlib import Path - -import librosa -import matplotlib.pyplot as plt -import numpy as np -import soundfile - -from inference import infer_tool -from inference import slicer -from inference.infer_tool import Svc - -logging.getLogger('numba').setLevel(logging.WARNING) -chunks_dict = infer_tool.read_temp("inference/chunks_temp.json") - - - -def main(): - import argparse - - parser = argparse.ArgumentParser(description='sovits4 inference') - - # 一定要设置的部分 - parser.add_argument('-m', '--model_path', type=str, default="logs/44k/G_0.pth", help='模型路径') - parser.add_argument('-c', '--config_path', type=str, default="configs/config.json", help='配置文件路径') - parser.add_argument('-cl', '--clip', type=float, default=0, help='音频强制切片,默认0为自动切片,单位为秒/s') - parser.add_argument('-n', '--clean_names', type=str, nargs='+', default=["君の知らない物語-src.wav"], help='wav文件名列表,放在raw文件夹下') - parser.add_argument('-t', '--trans', type=int, nargs='+', default=[0], help='音高调整,支持正负(半音)') - parser.add_argument('-s', '--spk_list', type=str, nargs='+', default=['nen'], help='合成目标说话人名称') - - # 可选项部分 - parser.add_argument('-a', '--auto_predict_f0', action='store_true', default=False,help='语音转换自动预测音高,转换歌声时不要打开这个会严重跑调') - parser.add_argument('-cm', '--cluster_model_path', type=str, default="logs/44k/kmeans_10000.pt", help='聚类模型路径,如果没有训练聚类则随便填') - parser.add_argument('-cr', '--cluster_infer_ratio', type=float, default=0, help='聚类方案占比,范围0-1,若没有训练聚类模型则默认0即可') - parser.add_argument('-lg', '--linear_gradient', type=float, default=0, help='两段音频切片的交叉淡入长度,如果强制切片后出现人声不连贯可调整该数值,如果连贯建议采用默认值0,单位为秒') - parser.add_argument('-fmp', '--f0_mean_pooling', type=bool, default=False, help='是否对F0使用均值滤波器(池化),对部分哑音有改善。注意,启动该选项会导致推理速度下降,默认关闭') - - # 不用动的部分 - parser.add_argument('-sd', '--slice_db', type=int, default=-40, help='默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50') - parser.add_argument('-d', '--device', type=str, default=None, help='推理设备,None则为自动选择cpu和gpu') - parser.add_argument('-ns', '--noice_scale', type=float, default=0.4, help='噪音级别,会影响咬字和音质,较为玄学') - parser.add_argument('-p', '--pad_seconds', type=float, default=0.5, help='推理音频pad秒数,由于未知原因开头结尾会有异响,pad一小段静音段后就不会出现') - parser.add_argument('-wf', '--wav_format', type=str, default='flac', help='音频输出格式') - parser.add_argument('-lgr', '--linear_gradient_retain', type=float, default=0.75, help='自动音频切片后,需要舍弃每段切片的头尾。该参数设置交叉长度保留的比例,范围0-1,左开右闭') - - args = parser.parse_args() - - svc_model = Svc(args.model_path, args.config_path, args.device, args.cluster_model_path) - infer_tool.mkdir(["raw", "results"]) - clean_names = args.clean_names - trans = args.trans - spk_list = args.spk_list - slice_db = args.slice_db - wav_format = args.wav_format - auto_predict_f0 = args.auto_predict_f0 - cluster_infer_ratio = args.cluster_infer_ratio - noice_scale = args.noice_scale - pad_seconds = args.pad_seconds - clip = args.clip - lg = args.linear_gradient - lgr = args.linear_gradient_retain - F0_mean_pooling = args.f0_mean_pooling - - infer_tool.fill_a_to_b(trans, clean_names) - for clean_name, tran in zip(clean_names, trans): - raw_audio_path = f"raw/{clean_name}" - if "." not in raw_audio_path: - raw_audio_path += ".wav" - infer_tool.format_wav(raw_audio_path) - wav_path = Path(raw_audio_path).with_suffix('.wav') - chunks = slicer.cut(wav_path, db_thresh=slice_db) - audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks) - per_size = int(clip*audio_sr) - lg_size = int(lg*audio_sr) - lg_size_r = int(lg_size*lgr) - lg_size_c_l = (lg_size-lg_size_r)//2 - lg_size_c_r = lg_size-lg_size_r-lg_size_c_l - lg = np.linspace(0,1,lg_size_r) if lg_size!=0 else 0 - - for spk in spk_list: - audio = [] - for (slice_tag, data) in audio_data: - print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======') - - length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample)) - if slice_tag: - print('jump empty segment') - _audio = np.zeros(length) - audio.extend(list(infer_tool.pad_array(_audio, length))) - continue - if per_size != 0: - datas = infer_tool.split_list_by_n(data, per_size,lg_size) - else: - datas = [data] - for k,dat in enumerate(datas): - per_length = int(np.ceil(len(dat) / audio_sr * svc_model.target_sample)) if clip!=0 else length - if clip!=0: print(f'###=====segment clip start, {round(len(dat) / audio_sr, 3)}s======') - # padd - pad_len = int(audio_sr * pad_seconds) - dat = np.concatenate([np.zeros([pad_len]), dat, np.zeros([pad_len])]) - raw_path = io.BytesIO() - soundfile.write(raw_path, dat, audio_sr, format="wav") - raw_path.seek(0) - out_audio, out_sr = svc_model.infer(spk, tran, raw_path, - cluster_infer_ratio=cluster_infer_ratio, - auto_predict_f0=auto_predict_f0, - noice_scale=noice_scale, - F0_mean_pooling = F0_mean_pooling - ) - _audio = out_audio.cpu().numpy() - pad_len = int(svc_model.target_sample * pad_seconds) - _audio = _audio[pad_len:-pad_len] - _audio = infer_tool.pad_array(_audio, per_length) - if lg_size!=0 and k!=0: - lg1 = audio[-(lg_size_r+lg_size_c_r):-lg_size_c_r] if lgr != 1 else audio[-lg_size:] - lg2 = _audio[lg_size_c_l:lg_size_c_l+lg_size_r] if lgr != 1 else _audio[0:lg_size] - lg_pre = lg1*(1-lg)+lg2*lg - audio = audio[0:-(lg_size_r+lg_size_c_r)] if lgr != 1 else audio[0:-lg_size] - audio.extend(lg_pre) - _audio = _audio[lg_size_c_l+lg_size_r:] if lgr != 1 else _audio[lg_size:] - audio.extend(list(_audio)) - key = "auto" if auto_predict_f0 else f"{tran}key" - cluster_name = "" if cluster_infer_ratio == 0 else f"_{cluster_infer_ratio}" - res_path = f'./results/{clean_name}_{key}_{spk}{cluster_name}.{wav_format}' - soundfile.write(res_path, audio, svc_model.target_sample, format=wav_format) - -if __name__ == '__main__': - main()
              StepAction
              1Go to https://apkcombo.com/
              2Search for "ARK: Survival Evolved"
              3Select the game and the version
              4Download the APK file
              5Enable "Unknown sources"
              6Install the APK file
              7Launch the game