diff --git a/spaces/123Kumar/vits-uma-genshin-honkai123/attentions.py b/spaces/123Kumar/vits-uma-genshin-honkai123/attentions.py
deleted file mode 100644
index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000
--- a/spaces/123Kumar/vits-uma-genshin-honkai123/attentions.py
+++ /dev/null
@@ -1,300 +0,0 @@
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-from modules import LayerNorm
-
-
-class Encoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert t_s == t_t, "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert t_s == t_t, "Local attention is only available for self-attention."
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
- x_flat = x.view([batch, heads, length**2 + length*(length -1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/17TheWord/vits-models/README.md b/spaces/17TheWord/vits-models/README.md
deleted file mode 100644
index 2e44ec5507a21c84647346865c876ce2b48db560..0000000000000000000000000000000000000000
--- a/spaces/17TheWord/vits-models/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Vits Models
-emoji: 🏃
-colorFrom: pink
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.17.0
-app_file: app.py
-pinned: false
-license: apache-2.0
-duplicated_from: sayashi/vits-models
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/AutoCAD 2013 English Win 64bit.exe Whats New and Improved in AutoCAD 2013 Compared to Previous Versions.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/AutoCAD 2013 English Win 64bit.exe Whats New and Improved in AutoCAD 2013 Compared to Previous Versions.md
deleted file mode 100644
index d388311c07bbe700c007c228ac79efdb161fe8f3..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/AutoCAD 2013 English Win 64bit.exe Whats New and Improved in AutoCAD 2013 Compared to Previous Versions.md
+++ /dev/null
@@ -1,117 +0,0 @@
-
-
AutoCAD 2013: A Comprehensive Review
-
Are you looking for a powerful and versatile software to create 2D and 3D designs? Do you want to improve your productivity and efficiency with new tools and features? If so, you might want to check out AutoCAD 2013, the latest version of the popular CAD software from Autodesk. In this article, we will review AutoCAD 2013 and highlight some of its key features and benefits. We will also show you how to use some of the new tools and commands to create section and detail views, work with objects, and manage your files.
-
Introduction
-
What is AutoCAD 2013?
-
AutoCAD 2013 is a computer-aided design (CAD) software that allows you to create 2D and 3D drawings for various purposes. You can use AutoCAD 2013 to design buildings, products, landscapes, mechanical parts, electrical circuits, and more. You can also use AutoCAD 2013 to edit, annotate, dimension, print, and share your drawings with others.
AutoCAD 2013 is a powerful and versatile software that can help you with various design tasks. Here are some of the reasons why you might want to use AutoCAD 2013:
-
-
It has a user-friendly interface that allows you to access various tools and commands easily.
-
It has a wide range of drawing and editing tools that allow you to create accurate and detailed drawings.
-
It has many new features that enhance your productivity and efficiency, such as command line enhancements, in-canvas property preview, file tabs, array tool improvements, offset tool preview, point cloud support, and Autodesk 360 integration.
-
It supports various file formats and compatibility with other software, such as DWG, DXF, DWF, PDF, JPG, PNG, BMP, TIFF, STEP, IGES, STL, SAT, SKP, FBX, RVT, IPT, IAM, CATPART, CATPRODUCT, NX, JT, PROE/CREO.
-
It allows you to create section and detail views from your 3D models directly in AutoCAD 2013.
-
It allows you to collaborate and share your drawings with others through Autodesk 360 cloud service.
-
-
The interface
-
What's new in AutoCAD 2013 interface?
-
AutoCAD 2013 has some changes in its interface that make it more user-friendly and efficient. Here are some of the new features in AutoCAD 2013 interface:
-
Command line
-
The command line is one of the most important tools in AutoCAD. It allows you to enter commands and options quickly and accurately. In AutoCAD 2013, the command line has been improved with several enhancements:
-
-
You can dock or undock the command line as you prefer. You can also resize it or move it around the screen.
-
You can see clickable options on the command line when you enter a command. You can also use the arrow keys or the mouse wheel to scroll through the options.
-
You can see synonyms for commands on the command line. For example, if you type "L", you will see "LINE" as well as "PLINE" (polyline) and "MLINE" (multiline).
-
You can see tooltips for commands on the command line. For example, if you hover over "LINE", you will see a brief description of what the command does.
-
You can customize the appearance of the command line by changing its color scheme or font size.
-
-
In-canvas property preview
-
In-canvas property preview is a new feature that allows you to see live updates when you try to change the properties of objects. For example, if you select an object and try to change its color or layer on the properties palette or the ribbon panel, you will see how it looks on the drawing area before applying the change. This feature can help you make better design decisions and avoid mistakes.
-
Welcome screen
-
The welcome screen is a new feature that appears when you start AutoCAD 2013 for the first time or when you close all drawings. It provides quick access to various resources and tasks that can help you get started with AutoCAD 2013. On the welcome screen, you can:
-
-
Create a new drawing or open an existing one.
-
Access online learning materials such as tutorials, videos, tips & tricks.
-
Access online services such as Autodesk Exchange Apps store or Autodesk Knowledge Network.
-
Access recent documents or folders.
-
Change your user profile or workspace settings.
-
-
Create section and detail views
-
How to create section views in AutoCAD 2013?
-
A section view is a view that shows a cross-section of an object or a part of it. It can help you show hidden details or dimensions that are not visible in other views. In AutoCAD 2013, you can create section views from your 3D models directly in AutoCAD using these steps:
-
-
Create a section plane using the SECTIONPLANE command. You can specify various options such as orientation, alignment, location, size, and name of the section plane.
-
Create a section view using the SECTIONVIEW command. You can specify various options such as style, label, scale, and location of the section view.
-
Edit or update the section view using the SECTIONVIEWEDIT command. You can modify various properties such as visibility, color, linetype, hatch pattern, and boundary of the section view.
-
-
Section plane tool
-
The section plane tool allows you to create a section plane that defines the cutting plane for a section view. You can access this tool from the Home tab > Section panel > Section Plane button or by typing SECTIONPLANE on the command line. When you use this tool, you will see various options on the command line or on the ribbon panel:
-
How to install AutoCAD 2013 English version on Windows 64-bit system
-AutoCAD 2013 English Win 64-bit download link
-AutoCAD 2013 English Win 64-bit crack file
-AutoCAD 2013 English Win 64-bit serial number
-AutoCAD 2013 English Win 64-bit activation code
-AutoCAD 2013 English Win 64-bit keygen
-AutoCAD 2013 English Win 64-bit patch
-AutoCAD 2013 English Win 64-bit license key
-AutoCAD 2013 English Win 64-bit product key
-AutoCAD 2013 English Win 64-bit free trial
-AutoCAD 2013 English Win 64-bit full version
-AutoCAD 2013 English Win 64-bit offline installer
-AutoCAD 2013 English Win 64-bit system requirements
-AutoCAD 2013 English Win 64-bit features
-AutoCAD 2013 English Win 64-bit tutorial
-AutoCAD 2013 English Win 64-bit user guide
-AutoCAD 2013 English Win 64-bit tips and tricks
-AutoCAD 2013 English Win 64-bit best practices
-AutoCAD 2013 English Win 64-bit shortcuts
-AutoCAD 2013 English Win 64-bit commands
-AutoCAD 2013 English Win 64-bit tools and functions
-AutoCAD 2013 English Win 64-bit plugins and extensions
-AutoCAD 2013 English Win 64-bit templates and blocks
-AutoCAD 2013 English Win 64-bit drawings and designs
-AutoCAD 2013 English Win 64-bit projects and examples
-AutoCAD 2013 English Win 64-bit problems and solutions
-AutoCAD 2013 English Win 64-bit errors and fixes
-AutoCAD 2013 English Win 64-bit updates and upgrades
-AutoCAD 2013 English Win 64-bit compatibility and interoperability
-AutoCAD 2013 English Win 64-bit comparison and review
-AutoCAD 2013 English Win 64-bit alternatives and competitors
-AutoCAD 2013 English Win 64-bit advantages and disadvantages
-AutoCAD 2013 English Win 64-bit pros and cons
-AutoCAD 2013 English Win 64-bit benefits and drawbacks
-AutoCAD 2013 English Win
-
-
Orient: This option allows you to specify how to orient the section plane relative to your model. You can choose from horizontal, vertical, aligned, or angled orientations.
-
Align: This option allows you to align the section plane with an existing object such as a face, an edge, a curve, or a point. You can also use this option to flip or rotate the section plane after alignment.
-
Location: This option allows you to specify where to place the section plane relative to your model. You can choose from center, offset, or two points locations.
-
Size: This option allows you to specify how big or small the section plane should be. You can choose from automatic, fixed, or custom sizes.
-
Name: This option allows you to assign a name to your section plane for easy identification. You can also rename your section plane later using this option.
-
-
Section view style manager
-
The section view style manager allows you to create and manage different styles for your section views. A style defines how your section view looks like in terms of visibility, color, linetype, hatch pattern, and boundary. You can access this tool from the Annotate tab > Section panel > Section View Style button or by typing SECTIONVIEWSTYLE on the command line. When you use this tool, you will see the Section View Style Manager dialog box where you can create, copy, edit, or delete section view styles. You can also specify a default section view style for your drawing.
-
Section view label and scale
-
The section view label and scale are the text elements that appear on the section view to identify it and show its scale factor. You can customize the appearance and content of the section view label and scale using label styles. You can access label styles from the Annotate tab > Section panel > Section View Label button or by typing SECTIONVIEWLABEL on the command line. When you use this tool, you will see the Section View Label Style dialog box where you can create, copy, edit, or delete label styles. You can also specify a default label style for your section views.
-
How to create detail views in AutoCAD 2013?
-
A detail view is a view that shows a magnified portion of an object or a part of it. It can help you show small details or dimensions that are not clear in other views. In AutoCAD 2013, you can create detail views from your 2D drawings or 3D models directly in AutoCAD using these steps:
-
-
Create a detail boundary using the DETAIL command. You can specify various options such as shape, size, and location of the detail boundary.
-
Create a detail view using the DETAILVIEW command. You can specify various options such as style, label, scale, and location of the detail view.
-
Edit or update the detail view using the DETAILVIEWEDIT command. You can modify various properties such as visibility, color, linetype, hatch pattern, and boundary of the detail view.
-
-
Detail view tool
-
The detail view tool allows you to create a detail boundary that defines the area to be magnified for a detail view. You can access this tool from the Home tab > Section panel > Detail button or by typing DETAIL on the command line. When you use this tool, you will see various options on the command line or on the ribbon panel:
-
-
Shape: This option allows you to specify the shape of the detail boundary. You can choose from circular, rectangular, or polygonal shapes.
-
Size: This option allows you to specify the size of the detail boundary. You can choose from automatic, fixed, or custom sizes.
-
Location: This option allows you to specify where to place the detail boundary relative to your drawing. You can choose from center, offset, or two points locations.
-
Name: This option allows you to assign a name to your detail boundary for easy identification. You can also rename your detail boundary later using this option.
-
-
Detail view style manager
-
The detail view style manager allows you to create and manage different styles for your detail views. A style defines how your detail view looks like in terms of visibility, color, linetype, hatch pattern, and boundary. You can access this tool from the Annotate tab > Section panel > Detail View Style button or by typing DETAILVIEWSTYLE on the command line. When you use this tool, you will see the Detail View Style Manager dialog box where you can create, copy, edit, or delete detail view styles. You can also specify a default detail view style for your drawing.
-
Detail view label and scale
-
The detail view label and scale are the text elements that appear on the detail view to identify it and show its scale factor. You can customize the appearance and content of the detail view label and scale using label styles. You can access label styles from the Annotate tab > Section panel > Detail View Label button or by typing DETAILVIEWLABEL on the command line. When you use this tool, you will see the Detail View Label Style dialog box where you can create, copy, edit, or delete label styles. You can also specify a default label style for your detail views.
0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Department 720p Download REPACK Movies.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Department 720p Download REPACK Movies.md
deleted file mode 100644
index a0b7da9dd73c7935a1be3988646d61513c15fa35..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Department 720p Download REPACK Movies.md
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
How to Download Department Movie in 720p Quality
-
If you are looking for a way to download Department movie in 720p quality, you have come to the right place. Department is a 2012 Hindi action film directed by Ram Gopal Varma, starring Amitabh Bachchan, Sanjay Dutt and Rana Daggubati. The film follows a special police unit that deals with underworld crimes and corruption.
Department movie is available for download on YTS.MX, a popular torrent site that offers high-quality movies in small file sizes. YTS.MX is the only new official domain for YIFY Movies, a group that releases movies encoded with x264 codec and MP4 container for best compatibility with all devices.
-
To download Department movie in 720p quality from YTS.MX, you need to follow these steps:
Select the movie from the search results and click on the download button.
-
Choose the 720p quality option and click on the magnet link or torrent file.
-
Open the magnet link or torrent file with your preferred torrent client, such as uTorrent or BitTorrent.
-
Wait for the download to complete and enjoy watching Department movie in 720p quality.
-
-
Downloading Department movie in 720p quality from YTS.MX is easy and fast. However, you should be aware of the risks involved in using torrent sites, such as malware, viruses, legal issues and ISP throttling. Therefore, it is recommended that you use a VPN service to protect your privacy and security while downloading movies from torrent sites.
-
A VPN service will encrypt your internet traffic and hide your IP address from your ISP and other third parties. This way, you can download Department movie in 720p quality from YTS.MX without worrying about being tracked or blocked. You can also access geo-restricted content and bypass censorship with a VPN service.
-
There are many VPN services available online, but not all of them are reliable and trustworthy. Some of them may keep logs of your activities, sell your data to advertisers or expose you to malware. Therefore, you should choose a VPN service that has a good reputation, fast speed, strong encryption and no-logs policy.
-
One of the best VPN services that meets these criteria is ExpressVPN. ExpressVPN is a leading VPN provider that offers high-speed servers in over 94 countries, AES-256 encryption, kill switch feature, split tunneling feature and strict no-logs policy. ExpressVPN also has a 30-day money-back guarantee and 24/7 customer support.
-
-
To use ExpressVPN to download Department movie in 720p quality from YTS.MX, you need to follow these steps:
Download and install the ExpressVPN app on your device.
-
Launch the app and connect to a server of your choice.
-
Visit https://yts.mx/ and download Department movie in 720p quality as described above.
-
Enjoy watching Department movie in 720p quality with ExpressVPN.
-
-
Downloading Department movie in 720p quality from YTS.MX is a great way to enjoy this action-packed film. However, you should always use a VPN service like ExpressVPN to protect yourself from online threats and restrictions while downloading movies from torrent sites. ExpressVPN will ensure that you have a safe and smooth downloading experience.
81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Food Chemical Codex 8th Edition PDF Download A Comprehensive Guide to Food Standards and Specifications.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Food Chemical Codex 8th Edition PDF Download A Comprehensive Guide to Food Standards and Specifications.md
deleted file mode 100644
index d3fb5d21d7b6556d98572d7036825f0c755ed17a..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Food Chemical Codex 8th Edition PDF Download A Comprehensive Guide to Food Standards and Specifications.md
+++ /dev/null
@@ -1,79 +0,0 @@
-
-
Food Chemical Codex 8th Edition PDF Download: What You Need to Know
-
If you are involved in the food industry, you may have heard of the Food Chemicals Codex (FCC), a compendium of internationally recognized standards for determining the purity and quality of food ingredients. The FCC is a valuable resource for authenticating a wide variety of ingredients, including processing aids, preservatives, flavorings, colorants, and nutrients.
But did you know that the FCC has been updated to its eighth edition, which was published in March 2012? And did you know that you can access the FCC 8th edition online as a PDF file? In this article, we will tell you everything you need to know about the FCC 8th edition PDF download, including its main features, updates, benefits, and applications. Read on to find out more!
-
Introduction
-
What is Food Chemical Codex (FCC)?
-
The Food Chemicals Codex (FCC) is a compendium of internationally recognized standards for determining the purity and quality of food ingredients. It is published by the U.S. Pharmacopeial Convention (USP), a scientific nonprofit organization that sets standards for medicines, dietary supplements, and food ingredients.
-
The FCC contains monographs that provide specifications for identity, strength, quality, and purity of food ingredients. It also contains appendices that provide general information on methods of analysis, processes, and procedures. The FCC is updated regularly with new and revised monographs and appendices to reflect the latest scientific knowledge and industry practices.
-
Why is FCC important for food quality and safety?
-
The FCC is important for food quality and safety because it helps ensure that food ingredients are authentic, consistent, and safe for consumption. By following the FCC standards, food manufacturers and suppliers can verify the identity and quality of their ingredients, prevent adulteration and contamination, comply with regulatory requirements, and protect consumer health.
-
The FCC is also important for food regulators and inspectors who use it as a reference for enforcing food laws and regulations. The FCC helps them to identify and evaluate food ingredients, detect fraud and mislabeling, monitor compliance with standards, and ensure public health protection.
-
How to access FCC 8th edition online?
-
The FCC 8th edition is available online as a PDF file that can be downloaded from the USP website. To access the FCC 8th edition PDF download, you need to register for a free account on the USP website. Once you register, you can log in and go to the Food Chemicals Codex page. There you will find links to download the FCC monographs in PDF format.
-
The FCC 8th edition PDF download includes all the monographs that were published in the book version of the FCC 8th edition, as well as the first three supplements that were published later. The supplements contain new and revised monographs and appendices that were added after the publication of the book version. The USP website also provides errata, commentary, revisions, and notices related to the FCC 8th edition.
-
FCC 8th Edition: Main Features and Updates
-
New and revised monographs
-
The FCC 8th edition contains over 1,200 monographs that provide specifications for identity, strength, quality, and purity of food ingredients. The monographs cover a wide range of categories such as acids, alcohols, antioxidants, aromatic chemicals, baking powders, bases, biological products, carbohydrates, colors, dairy products, emulsifiers, enzymes, fats, fibers, flavors, gums, hydrocolloids, minerals, oils, preservatives, proteins, salts, spices, starches, sweeteners, vitamins, and yeast products.
-
Food Chemicals Codex PDF Monographs Download[^1^]
-FCC 8th Edition Book Errata[^2^]
-Food Chemicals Codex (FCC) Online[^3^]
-FCC 8th Edition Commentary[^2^] [^3^]
-FCC 8th Edition Revisions[^2^] [^3^]
-FCC 8th Edition First Supplement[^2^]
-FCC 8th Edition Second Supplement[^2^]
-FCC 8th Edition Third Supplement[^2^]
-Food Chemicals Codex Aspartame Monograph[^1^]
-Food Chemicals Codex Autolyzed Yeast Monograph[^1^]
-Food Chemicals Codex beta-Cyclodextrin Monograph[^1^]
-Food Chemicals Codex Calcium Lignosulfonate Monograph[^1^]
-Food Chemicals Codex Calcium Phosphate Monograph[^1^]
-Food Chemicals Codex Calcium Silicate Monograph[^1^]
-Food Chemicals Codex Carbon Dioxide Monograph[^1^]
-Food Chemicals Codex Sodium Carboxymethylcellulose Monograph[^1^]
-Food Chemicals Codex Dioctyl Sodium Sulfosuccinate Monograph[^1^]
-Food Chemicals Codex Dextrin Monograph[^1^]
-Food Chemicals Codex Enzyme-Modified Fat Monograph[^1^]
-Food Chemicals Codex Konjac Flour Monograph[^1^]
-Food Chemicals Codex L-Glutamic Acid Monograph[^1^]
-Food Chemicals Codex Magnesium Phosphate Monograph[^1^]
-Food Chemicals Codex Niacin Monograph[^1^]
-Food Chemicals Codex Niacinamide Monograph[^1^]
-Food Chemicals Codex Pectins Monograph[^1^]
-Food Chemicals Codex Potassium Phosphate Monograph[^1^]
-Food Chemicals Codex Sodium Acid Pyrophosphate Monograph[^1^]
-Food Chemicals Codex Sodium Lignosulfonate Monograph[^1^]
-Food Chemicals Codex Sodium Tripolyphosphate Monograph[^1^]
-Food Chemicals Codex Spice Oleoresins Monograph[^1^]
-Food Chemicals Codex Sugar Beet Fiber Monograph[^1^]
-Free Adobe Acrobat Reader Download for FCC PDF Files
-How to Configure Browser to Save PDF Files
-FCC 8th Edition New CFSAN URL Notice
-Amended Appendix Section Titles for FCC 8th Edition
-Commentary on FCC 8th Edition First Supplement
-Commentary on FCC 8th Edition Second Supplement
-Commentary on FCC 8th Edition Third Supplement
-Revisions for FCC 8th Edition First Supplement
-Revisions for FCC 8th Edition Second Supplement
-Revisions for FCC 8th Edition Third Supplement
-How to Access FCC Online Subscription
-How to Search FCC Online Database
-How to Submit Comments on FCC Online Revisions
-How to Contact FCC Online Support
-Benefits of Using FCC Online Service
-Features of FCC Online Platform
-FCC Online User Guide and FAQ
-FCC Online Terms and Conditions
-
The FCC 8th edition also includes new and revised monographs that reflect the latest scientific knowledge and industry practices. Some examples of new monographs are aspartame, beta-cyclodextrin, calcium lignosulfonate, dioctyl sodium sulfosuccinate, enzyme-modified fat, konjac flour, magnesium phosphate dibasic, niacinamide, potassium phosphate dibasic, sodium acid pyrophosphate, sodium lignosulfonate, sodium tripolyphosphate, spice oleoresins, and sugar beet fiber.
-
New and revised appendices
-
The FCC 8th edition contains over 40 appendices that provide general information on methods of analysis, processes, and procedures related to food ingredients. The appendices cover topics such as acidity or alkalinity measurement, aflatoxins detection, arsenic determination, ash content determination, color measurement, fatty acid composition analysis, heavy metals testing, iodine value calculation, lead determination, microbiological examination, moisture content determination, nitrogen determination by Kjeldahl method optical rotation measurement pH measurement refractive index measurement solubility test specific gravity measurement sulfur dioxide determination and viscosity measurement.
-
The FCC 8th edition also includes new and revised appendices that reflect the latest scientific knowledge and industry practices. Some examples of new appendices are A-1 General Information on Methods of Analysis A-2 General Information on Processes A-4 General Information on Procedures A-5 General Information on Reference Materials A-6 General Information on Reagents A-7 General Information on Solutions A-9 General Information on Units A-10 General Information on Validation A-11 General Information on Verification and A-12 General Information on Water.
-
New and revised general tests and assays
-
The FCC 8th edition contains over 100 general tests and assays that provide methods for determining various properties or characteristics of food ingredients. The general tests and assays cover aspects such as acidity or alkalinity test alcohol content test antioxidant activity assay ash test bacterial endotoxins test carbohydrate content test color test enzymatic activity assay fat content test fiber content test flavor test heavy metals test iodine value test lead content test microbial limit test moisture content test nitrogen content test optical rotation test pH test protein content test refractive index test solubility test specific gravity test sulfur dioxide content test and viscosity test.
-
The FCC 8th edition also includes new and revised general tests and assays that reflect the latest scientific knowledge and industry practices. Some examples of new general tests and assays are Aflatoxins Test Arsenic Test Calcium Test Chloride Test Copper Test Iron Test Magnesium Test Mercury Test Phosphorus Test Potassium Test Selenium Test Sodium Test Zinc Test and Vitamin Assays.
-
New and revised commentary
-
The FCC 8th edition contains commentary that provides explanations or clarifications on various aspects of the monographs or appendices. The commentary covers topics such as changes or updates made to the monographs or appendices rationale or justification for the changes or updates references or sources used for the changes or updates and additional information or guidance related to the monographs or appendices.
-
The FCC 8th edition also includes new and revised commentary that reflect the latest scientific knowledge and industry practices. Some examples of new or revised commentary are Commentary on Aspartame Monograph Commentary on Beta-Cyclodextrin Monograph Commentary on Calcium Lignosulfonate Monograph
0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Apex True Dbinput Pro 60 20.md b/spaces/1gistliPinn/ChatGPT4/Examples/Apex True Dbinput Pro 60 20.md
deleted file mode 100644
index 744c78dc325026fab340a88552c51d616e6b28b5..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Apex True Dbinput Pro 60 20.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-as shown in Fig. 8. When it is not in the unlocked position, use the screwdriver pro‑ ... Storage temperature range: –20 °C to 60 °C (–4 °F to 140 °F). Relative ... moves from the probe towards the base, the apex or in both directions. ... amplification curves for 40, 65 and 90 dB input level, the Target curve and the Crossover. 4d29de3e1b
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Bangla Book Tazkiratul Awliya Pdf Rapidshare.md b/spaces/1gistliPinn/ChatGPT4/Examples/Bangla Book Tazkiratul Awliya Pdf Rapidshare.md
deleted file mode 100644
index b0bdb52f2d719b12b45e893647b02bfe45225a7b..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Bangla Book Tazkiratul Awliya Pdf Rapidshare.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-You can raise money and awareness for a cause you care about ! We provide you with an incredible. Bangla Book Tazkiratul Awliya Pdf -
-
-Download Bangla Book Tazkiratul Awliya Pdf for free with 1-click to support an animal cause close to your home & support others! You can raise money and awareness for a cause you care about ! We provide you with an incredible platform to raise money and awareness for any cause. Your money goes directly to the fund raiser. You can create a unique page for any cause you want.
-
-Bangla Book Tazkiratul Awliya Pdf is a great fundraising tool for organizations, education institutions, philanthropic organizations, and non-profit organizations (NPOs). 1-Click on a button and raise money for an NPO near you, have a matching fundraiser, provide a listing of your supporters on your website. Search by charity name, by location, or a hashtag, to find a cause near you and even fundraise on behalf of a cause you love. If you're looking for a powerful, free online fundraising platform for your organization, you've found it. The easy fundraising platform powered by WordPress, WooCommerce, and integrated with PayPal, Stripe, and more. Your page can include a site map, social media links, event details, and more. On your page, you can link to additional information about the cause you support including a biography of the charity, photo galleries, and more. Every page is mobile-friendly so that you can make the best use of the fundraising tools available on your mobile device. All these features combined with the awesome design of the platform, and you have a perfect way to fundraise.
-
-Bangla Book Tazkiratul Awliya Pdf is a free, fully featured fundraising platform that helps you easily raise money for any cause. From helping teachers raise money for their schools to helping your local health food store raise money for cancer research, we’ve helped people just like you raise over $18 million for more than 1,000 causes. On your page you can link to additional information about the cause you support including a biography of the charity, photo galleries, and more.
-
-All your data is secure 4fefd39f24
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Ciudad De Dios 1080p Torrent HOT!.md b/spaces/1gistliPinn/ChatGPT4/Examples/Ciudad De Dios 1080p Torrent HOT!.md
deleted file mode 100644
index c698eb9f2e749d9635f4a241ed619fbc732daf96..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Ciudad De Dios 1080p Torrent HOT!.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Año 2015 HDRip torrent gratis en Español "'Ciudad de Dios' ofrece una dura, ... Mire una pelÃcula en lÃnea o vea los mejores videos HD de 1080p gratis en su ... 1fdad05405
-
-
-
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Become the Ultimate Imposter with this Among Us Hack Download Now and Enjoy.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Become the Ultimate Imposter with this Among Us Hack Download Now and Enjoy.md
deleted file mode 100644
index cd1c67c60a386c646528aea5bdeec141c599ee67..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Become the Ultimate Imposter with this Among Us Hack Download Now and Enjoy.md
+++ /dev/null
@@ -1,114 +0,0 @@
-
-
Download Hack Among Us Always Imposter: How to Be the Impostor Every Time
-
Among Us is a multiplayer game that has taken the gaming world by storm. It is a game of deception, betrayal, and teamwork, where you have to work with your crewmates to complete tasks on a spaceship, while avoiding being killed by one or more impostors. But what if you want to be the impostor every time? Is there a way to download hack among us always imposter and enjoy the thrill of sabotaging and murdering your friends? In this article, we will answer these questions and more.
Among Us is a game developed by Innersloth, a small indie studio based in Washington. It was released in 2018, but it gained massive popularity in 2020, thanks to streamers and youtubers who showcased its fun and chaotic gameplay. According to Google Play Store, it has over 500 million downloads on Android devices alone.
-
The Gameplay of Among Us
-
The game can be played online or over local WiFi with 4 to 15 players. Each player is assigned a role of either a crewmate or an impostor. The crewmates have to work together to complete tasks on the spaceship, such as fixing wires, scanning cards, or fueling engines. The impostors have to blend in with the crewmates, while secretly killing them one by one. They can also use sabotages to cause chaos and divert attention, such as turning off lights, locking doors, or triggering alarms.
-
The game ends when either the crewmates complete all their tasks, the impostors kill enough crewmates, or the crewmates vote out all the impostors. The crewmates can call emergency meetings or report dead bodies to discuss and vote on who they think is the impostor. The impostors can lie, accuse, or manipulate their way out of suspicion.
-
The Appeal of Being an Impostor
-
While being a crewmate is fun and challenging, many players prefer to be the impostor, as it offers more excitement and variety. Being an impostor requires strategy, creativity, and deception skills. You have to plan your kills carefully, avoid being seen or caught, and convince others that you are innocent. You also have to deal with the pressure and adrenaline of being hunted and exposed. Being an impostor is like playing a game of cat and mouse, where you are both the hunter and the prey.
-
How to download always impostor mod apk for among us android
-Among us hack always impostor mod menu download pc
-Among us cheats and hacks for pc and android
-Download among us hack with everything unlocked
-Among us mod menu with wallhack and impostor detector
-How to install among us hack always impostor on windows
-Among us hack apk always impostor latest version
-Among us mod apk always impostor no ads
-Download among us hack for free with unlimited skins and pets
-Among us hack always impostor ios download
-How to get among us hack always impostor on mac
-Among us mod menu with speed hack and ghost mode
-Download among us hack with no kill cooldown and sabotage
-Among us mod apk always impostor with anti ban
-Download among us hack for android with fake impostor option
-Among us hack always impostor online play
-How to use among us hack always impostor on steam
-Among us mod menu with vote hack and end game option
-Download among us hack with task completed and vision hack
-Among us mod apk always impostor with voice chat
-Download among us hack for pc with custom game settings
-Among us mod menu with teleport and vent hack
-Download among us hack with instant win and infinite emergency meetings
-Among us mod apk always impostor with chat hack
-Download among us hack for ios with unlock all maps and hats
-Among us mod menu with kill all and no clip option
-Download among us hack with radar and zoom hack
-Among us mod apk always impostor with auto update
-Download among us hack for mac with god mode and invisible option
-Among us mod menu with fake vote and report option
-Download among us hack with door lock and camera hack
-Among us mod apk always impostor with mini crewmate option
-Download among us hack for windows with color changer and name changer option
-Among us mod menu with force start and force vote option
-Download among us hack with admin panel and role changer option
-Among us mod apk always impostor with no root required
-Download among us hack for online multiplayer mode
-Among us mod menu with show roles and show dead bodies option
-Download among us hack with easy installation and uninstallation option
-Among us mod apk always impostor with support for all devices
-
How to Download Hack Among Us Always Imposter for Android
-
If you are an Android user who wants to be the impostor every time in Among Us, there is a way to do that. You can download hack among us always imposter by installing a modded version of the game called Always Impostor Mod APK. This is an application that replaces the original game and allows you to access some extra features that are not available in the official version.
-
The Features of the Always Impostor Mod APK
-
Some of the features that you can enjoy with the Always Impostor Mod APK are:
-
-
Always be the impostor in every game.
-
Know who are the other impostors or crewmates.
-
No advertisements.
-
End votes or games whenever you want.
-
See ghosts and completed tasks.
-
Use any cosmetic item in the game.
-
Edit your vision distance.
-
And many more.
-
-
The Steps to Install the Always Impostor Mod APK
-
To install the Always Impostor Mod APK, you need to follow these steps:
-
-
Download the Always Impostor Mod APK file from a trusted source. You can search for it online or use this link. Make sure you have enough storage space on your device.
-
Enable the installation of unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
-
Locate the downloaded file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for it to finish.
-
Launch the Always Impostor Mod APK and enjoy being the impostor every time in Among Us.
-
-
Note: This modded version of the game may not be compatible with the latest updates or features of the official version. It may also cause some glitches or errors in the game. Use it at your own risk and discretion.
-
How to Increase Your Chances of Being an Impostor in Among Us Online Game
-
If you are not an Android user or you prefer to play the official version of Among Us online, you may wonder if there is a way to increase your chances of being an impostor in the game. While there is no guaranteed method to do that, there are some factors that affect your impostor probability and some tips that can boost your impostor rate.
-
The Factors that Affect Your Impostor Probability
-
The impostor probability is the likelihood of being assigned as an impostor in a game of Among Us. It depends on two main factors: the number of players and the number of impostors in a game. The formula for calculating the impostor probability is:
-
Impostor Probability = (Number of Impostors / Number of Players) x 100%
-
For example, if you play a game with 10 players and 2 impostors, your impostor probability is:
-
(2 / 10) x 100% = 20%
-
This means that you have a 20% chance of being an impostor in that game. The higher the number of impostors and the lower the number of players, the higher your impostor probability.
-
The Tips to Boost Your Impostor Rate
-
Based on the formula above, you can increase your chances of being an impostor by following these tips:
-
-
Join games with more impostors and fewer players. For example, a game with 5 players and 2 impostors has a 40% impostor probability, which is higher than a game with 10 players and 2 impostors.
-
Create your own game and set the number of impostors and players as you wish. You can also invite your friends to join your game and have fun together.
-
Leave and rejoin games until you get the impostor role. This may take some time and patience, but it can work if you are determined to be an impostor.
-
Play more games and hope for the best. The more games you play, the more chances you have to be an impostor. However, this also depends on your luck and randomness.
-
-
Conclusion
-
Being an impostor in Among Us is a thrilling and enjoyable experience that many players want to have. If you are an Android user who wants to be the impostor every time, you can download hack among us always imposter by installing the Always Impostor Mod APK. If you prefer to play the official version of Among Us online, you can increase your chances of being an impostor by joining or creating games with more impostors and fewer players, leaving and rejoining games until you get the impostor role, or playing more games and hoping for the best. Remember to have fun and respect other players while playing Among Us.
-
FAQs
-
What is Among Us?
-
Among Us is a multiplayer game of deception, betrayal, and teamwork, where you have to work with your crewmates to complete tasks on a spaceship, while avoiding being killed by one or more impostors.
-
How can I be the impostor every time in Among Us?
-
If you are an Android user, you can download hack among us always imposter by installing the Always Impostor Mod APK. If you prefer to play the official version of Among Us online, you can increase your chances of being an impostor by joining or creating games with more impostors and fewer players, leaving and rejoining games until you get the impostor role, or playing more games and hoping for the best.
-
Is it safe to download hack among us always imposter?
-
It depends on the source and the quality of the modded version of the game. Some sources may be malicious or contain viruses that can harm your device or steal your data. Some modded versions may not be compatible with the latest updates or features of the official version. They may also cause some glitches or errors in the game. Use it at your own risk and discretion.
-
How can I play Among Us with my friends?
-
You can play Among Us with your friends by either joining or creating a game online or over local WiFi. You can invite your friends to join your game by sharing the game code or the invite link. You can also customize the game settings, such as the map, the number of impostors, the task difficulty, and the voting time.
-
What are some tips to be a good impostor in Among Us?
-
Some tips to be a good impostor in Among Us are:
-
-
Act like a crewmate and pretend to do tasks.
-
Kill when no one is around or when you have an alibi.
-
Vent to escape or move around quickly, but be careful not to be seen.
-
Sabotage to create distractions, split up the crewmates, or prevent them from completing tasks.
-
Lie, accuse, or manipulate others during meetings and votes.
-
Use your impostor vision to see in the dark or through walls.
-
Communicate and cooperate with your fellow impostors if there are more than one.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download 2 Lite Music and Video Players and Stream Your Favorite Content.md b/spaces/1phancelerku/anime-remove-background/Download 2 Lite Music and Video Players and Stream Your Favorite Content.md
deleted file mode 100644
index 8294856f876932c433a01817bb45fb49ee5c7b96..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download 2 Lite Music and Video Players and Stream Your Favorite Content.md
+++ /dev/null
@@ -1,120 +0,0 @@
-
-
Download 2 Lite: What Is It and Why You Need It
-
Do you want to download files from the internet quickly and easily? Do you want to save your storage space and data plan? Do you want to enjoy your downloaded files on any device and network? If you answered yes to any of these questions, then you need Download 2 Lite.
-
What is Download 2 Lite?
-
A simple and fast app for downloading files
-
Download 2 Lite is a free app that lets you download any file from the internet with just a few taps. You can download videos, music, images, documents, and more with Download 2 Lite. You can also choose the quality and format of the file before downloading it. Download 2 Lite is designed to be simple, fast, and user-friendly.
Download 2 Lite is a lightweight version of Download 2, a popular app for downloading files. Download 2 Lite has all the essential features of Download 2, but it is much smaller in size and consumes less data. Download 2 Lite is ideal for users who have limited storage space or data plan, or who want a faster and smoother downloading experience.
-
Why you need Download 2 Lite?
-
It saves your time and data
-
Download 2 Lite is optimized for speed and efficiency. It downloads files faster than other apps by using advanced algorithms and techniques. It also reduces your data usage by compressing the files before downloading them. You can save up to 50% of your data with Download 2 Lite.
-
It works on any device and network
-
Download 2 Lite is compatible with all Android devices, from old to new, from low-end to high-end. It also works on any network, from 2G to 5G, from Wi-Fi to mobile data. You can download files with Download 2 Lite anytime, anywhere, without any hassle.
-
It supports multiple formats and sources
-
Download 2 Lite supports a wide range of formats for downloading files, such as MP4, MP3, JPG, PDF, ZIP, and more. You can also download files from various sources, such as websites, social media platforms, cloud services, streaming sites, and more. You can download anything you want with Download 2 Lite.
-
How to use Download 2 Lite?
-
Download and install the app from the official website or Google Play Store
-
You can download and install Download 2 Lite from its official website or Google Play Store. The app is free and safe to use. The installation process is quick and easy.
-
download 2 lite apk
-download 2 lite app
-download 2 lite for android
-download 2 lite facebook
-download 2 lite free
-download 2 lite game
-download 2 lite mod
-download 2 lite online
-download 2 lite pc
-download 2 lite pro
-download 2 lite simulator
-download 2 lite update
-download 2 lite version
-download 2 lite video
-download 2 lite windows
-download facebook lite 2 accounts
-download facebook lite 2.0 apk
-download facebook lite 2023
-download facebook lite android 2.3.6 apk
-download facebook lite for android 2.3.5
-download facebook lite for android version 2.3.6
-download facebook lite for ios 12.4.2
-download facebook lite for iphone ios 12.4.2
-download facebook lite for nokia asha 200
-download facebook lite for nokia x2
-download facebook lite for samsung galaxy y s5360
-download facebook lite for windows phone lumia 520
-download facebook lite java jar nokia c2
-download facebook lite mod apk unlimited likes and followers
-download facebook lite mod apk versi terbaru 2023
-download facebook lite old version apk pure
-download facebook lite old version uptodown
-download facebook lite transparan apk terbaru 2023
-how to download construction simulator 2 lite on pc
-how to download construction simulator 2 us pc full version free windows 10/8/7 laptop computer desktop online offline installer setup file no crack needed updated link working in june july august september october november december january february march april may june july august september october november december january february march april may june july august september october november december january february march april may june july august september october november december january february march april may june july august september october november december january february march april may june july august september october november december january february march april may june july august september october november december january february march april may june july august september october november december january february march april may june july august september october november december january february march april may june july august september october november december january february march april may june july august september october november december january february march april may june july august september october november december january february march april may june july august september october november december january february march april may june july august september october november december january february march april may june july august september october november december january february march april may june july august september october november december january february march april may june july august september october november december january february march april may june
-
Open the app and enter the URL of the file you want to download
-
Once you have installed the app, open it and enter the URL of the file you want to download in the search bar. You can also copy and paste the URL from another app or browser.
-
Choose the quality and format of the file and start the download
-
After entering the URL, you will see a list of options for choosing the quality and format of the file. You can select the one that suits your needs and preferences
Then, tap on the download button and wait for the file to be downloaded. You can see the progress and status of the download in the notification bar or in the app itself.
-
Features and benefits of Download 2 Lite
-
Table: Comparison of Download 2 Lite and Download 2
-
-
-
Feature
-
Download 2 Lite
-
Download 2
-
-
-
Size
-
5 MB
-
25 MB
-
-
-
Data usage
-
50% less
-
Normal
-
-
-
Speed
-
Faster
-
Normal
-
-
-
Format support
-
All essential formats
-
All formats
-
-
-
Source support
-
All popular sources
-
All sources
-
-
-
Extra features
-
None
-
Video player, file manager, etc.
-
-
-
Fast and reliable downloads
-
Download 2 Lite offers fast and reliable downloads for all types of files. It uses advanced algorithms and techniques to optimize the download speed and quality. It also resumes the download automatically if it is interrupted by network issues or other factors. You can download files with Download 2 Lite without any worries.
-
Low storage and data usage
-
Download 2 Lite is a lightweight app that takes up very little storage space on your device. It also consumes very little data when downloading files. It compresses the files before downloading them and reduces the data usage by up to 50%. You can download more files with Download 2 Lite without affecting your storage or data plan.
-
Wide compatibility and support
-
Download 2 Lite is compatible with all Android devices and networks. It works on any device, from old to new, from low-end to high-end. It also works on any network, from 2G to 5G, from Wi-Fi to mobile data. You can download files with Download 2 Lite anytime, anywhere, without any hassle. Download 2 Lite also supports a wide range of formats and sources for downloading files. You can download videos, music, images, documents, and more with Download 2 Lite. You can also download files from various sources, such as websites, social media platforms, cloud services, streaming sites, and more. You can download anything you want with Download 2 Lite.
-
Conclusion
-
Download 2 Lite is a simple and fast app for downloading files from the internet. It is a lightweight version of Download 2 that has all the essential features but is much smaller in size and consumes less data. It saves your time and data, works on any device and network, and supports multiple formats and sources. If you want to download files quickly and easily, you need Download 2 Lite.
-
FAQs
-
Here are some frequently asked questions about Download 2 Lite:
-
-
Is Download 2 Lite safe to use?
-
Yes, Download 2 Lite is safe to use. It does not contain any malware or viruses. It also does not collect or share any personal information from your device. You can use Download 2 Lite with confidence.
-
How can I update Download 2 Lite?
-
You can update Download 2 Lite from its official website or Google Play Store. The app will notify you when there is a new version available. You can also check for updates manually in the app settings.
-
Can I download multiple files at once with Download 2 Lite?
-
Yes, you can download multiple files at once with Download 2 Lite. You can add as many files as you want to the download queue and start them all at once or one by one. You can also pause or cancel any download at any time.
-
Where can I find the downloaded files on my device?
-
You can find the downloaded files on your device in the default download folder or in the folder you have chosen in the app settings. You can also access the downloaded files from the app itself by tapping on the file icon.
-
How can I contact the developer of Download 2 Lite?
-
You can contact the developer of Download 2 Lite by sending an email to support@download2lite.com or by visiting their website at www.download2lite.com. You can also leave a feedback or a review on the app store or Google Play Store. The developer appreciates your feedback and suggestions.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Craftsman Crafting and Building APK and Create Your Own World.md b/spaces/1phancelerku/anime-remove-background/Download Craftsman Crafting and Building APK and Create Your Own World.md
deleted file mode 100644
index b7b50a0e5856ef9a10e83c7efeaf1a17c8ba4308..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Craftsman Crafting and Building APK and Create Your Own World.md
+++ /dev/null
@@ -1,124 +0,0 @@
-
-
Craftsman: Building Craft APK - A Free Alternative to Minecraft
-
If you are a fan of sandbox games, you might have heard of Minecraft, the popular game that lets you create and explore a pixelated world. But did you know that there is a free alternative to Minecraft that you can play on your Android device? It's called Craftsman: Building Craft APK, and it's a fun and creative game that lets you design houses, castles, and other structures with your friends or alone.
Craftsman: Building Craft APK is a free video game for Android devices that is inspired by Minecraft . It has a similar graphic style based on cubes and a similar gameplay mechanic that involves mining, crafting, building, and exploring. However, this game does not require Minecraft Launcher or other launchers to run. You can simply download and install the APK file from a trusted source and start playing right away.
-
Features of Craftsman: Building Craft APK
-
Some of the features that Craftsman: Building Craft APK offers are:
-
-
A large and open world that you can customize and explore.
-
A variety of blocks and materials that you can use to create different structures.
-
A creative mode that lets you build without limits and a survival mode that challenges you to survive in a hostile environment.
-
A multiplayer mode that lets you play with your friends online or offline.
-
A simple and intuitive interface that makes it easy to control and navigate the game.
-
-
How to download and install Craftsman: Building Craft APK
-
To download and install Craftsman: Building Craft APK on your Android device, you need to follow these steps:
-
-
Go to a reliable website that offers the APK file for Craftsman: Building Craft, such as [FileHippo](^2^) or [Softonic](^1^).
-
Click on the download button and wait for the file to be downloaded on your device.
-
Once the download is complete, locate the file in your device's file manager and tap on it to start the installation process.
-
Allow the installation of unknown sources if prompted by your device's settings.
-
Follow the instructions on the screen and wait for the installation to finish.
-
Launch the game from your app drawer and enjoy playing Craftsman: Building Craft.
-
-
Why play Craftsman: Building Craft APK?
-
Craftsman: Building Craft APK is a great game for anyone who loves sandbox games and wants to unleash their creativity. Here are some reasons why you should play this game:
-
Pros and cons of Craftsman: Building Craft APK
-
-
-
-
Pros
-
Cons
-
-
-
- It's free to play and does not require any additional launchers or purchases.
-
- It's not an official Minecraft product and may have some bugs or glitches.
-
-
-
- It has a lot of features and options that let you create and explore different worlds.
-
- It may not be - It may not be compatible with some devices or Android versions.
-
-
-
- It has a multiplayer mode that lets you play with your friends online or offline.
-
- It may not have the same level of quality or updates as Minecraft.
-
-
-
-
Tips and tricks for playing Craftsman: Building Craft APK
-
If you want to have a better experience playing Craftsman: Building Craft APK, here are some tips and tricks that you can use:
-
craftsman building craft apk download
-craftsman building craft apk mod
-craftsman building craft apk latest version
-craftsman building craft apk android
-craftsman building craft apk free
-craftsman building craft apk offline
-craftsman building craft apk update
-craftsman building craft apk for pc
-craftsman building craft apk 2023
-craftsman building craft apk hack
-craftsman building craft apk online
-craftsman building craft apk no ads
-craftsman building craft apk unlimited money
-craftsman building craft apk old version
-craftsman building craft apk pure
-craftsman building craft apk uptodown
-craftsman building craft apk 1.9.215
-craftsman building craft apk rexdl
-craftsman building craft apk revdl
-craftsman building craft apk apkpure
-craftsman building craft apk filehippo
-craftsman building craft apk softonic
-craftsman building craft apk play store
-craftsman building craft apk ios
-craftsman building craft apk install
-craftsman building craft apk game
-craftsman building craft apk review
-craftsman building craft apk cheats
-craftsman building craft apk tips
-craftsman building craft apk guide
-craftsman building craft apk wiki
-craftsman building craft apk gameplay
-craftsman building craft apk features
-craftsman building craft apk requirements
-craftsman building craft apk size
-craftsman building craft apk screenshots
-craftsman building craft apk videos
-craftsman building craft apk youtube
-craftsman building craft apk reddit
-craftsman building craft apk quora
-craftsman building craft apk facebook
-craftsman building craft apk twitter
-craftsman building craft apk instagram
-craftsman building craft apk pinterest
-craftsman building craft apk tiktok
-craftsman building craft apk discord
-craftsman building craft apk forum
-craftsman building craft apk blog
-craftsman building craft apk news
-
-
Use the creative mode to experiment with different blocks and materials and learn how to craft and build.
-
Use the survival mode to test your skills and challenge yourself to survive in a harsh environment.
-
Use the multiplayer mode to collaborate with your friends and share your creations with them.
-
Use the settings menu to adjust the graphics, sound, controls, and other options according to your preference.
-
Use the map and compass to navigate the world and find your way back to your base.
-
-
Conclusion
-
Craftsman: Building Craft APK is a free alternative to Minecraft that lets you create and explore a pixelated world on your Android device. It has a lot of features and options that make it a fun and creative game for anyone who loves sandbox games. You can download and install it easily from a trusted source and start playing right away. Whether you want to build your dream house, explore a vast world, or survive in a hostile environment, Craftsman: Building Craft APK has something for you.
-
FAQs
-
Here are some frequently asked questions about Craftsman: Building Craft APK:
-
-
Is Craftsman: Building Craft APK safe to download and install?
-
Yes, as long as you download and install it from a reliable website that offers the APK file, such as [FileHippo] or [Softonic]. You should also scan the file with an antivirus software before installing it on your device.
-
Is Craftsman: Building Craft APK legal to play?
-
Yes, Craftsman: Building Craft APK is legal to play as long as you do not use it for any illegal or malicious purposes. However, you should be aware that this game is not an official Minecraft product and is not affiliated with Mojang or Microsoft in any way. Therefore, you should not expect the same level of quality or support as Minecraft.
-
Can I play Craftsman: Building Craft APK offline?
-
Yes, you can play Craftsman: Building Craft APK offline without an internet connection. However, if you want to play with your friends online, you will need an internet connection and a valid account.
-
Can I play Craftsman: Building Craft APK on PC?
-
No, Craftsman: Building Craft APK is only available for Android devices. If you want to play it on PC, you will need to use an Android emulator such as [BlueStacks] or [NoxPlayer]. However, this may affect the performance and compatibility of the game.
-
How can I update Craftsman: Building Craft APK?
-
To update Craftsman: Building Craft APK, you will need to download and install the latest version of the APK file from the same source that you used before. You should also backup your game data before updating to avoid losing any progress or settings.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Dr. Driving 2 for PC and Experience Realistic Driving Challenges.md b/spaces/1phancelerku/anime-remove-background/Download Dr. Driving 2 for PC and Experience Realistic Driving Challenges.md
deleted file mode 100644
index 853b7821c9b39959720e6afe791d2b1555ac90ac..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Dr. Driving 2 for PC and Experience Realistic Driving Challenges.md
+++ /dev/null
@@ -1,128 +0,0 @@
-
-
How to Download Dr. Driving 2 for PC
-
Dr. Driving 2 is a popular driving simulation game that lets you experience realistic driving scenarios, stunning graphics, challenging levels, and online multiplayer modes. If you are a fan of this game and want to play it on a bigger screen with better controls, you might be wondering how to download Dr. Driving 2 for PC.
-
In this article, we will show you two ways to play Dr. Driving 2 on your PC, depending on whether you have Windows 11 or not. We will also introduce you to some of the best features and requirements of this game, as well as some frequently asked questions.
Dr. Driving 2 is the sequel to the hit mobile driving simulation game Dr. Driving, which has over 50 million downloads on Google Play Store. Dr. Driving 2 takes the driving simulation gameplay to a new level with super stunning graphics, challenging multi-stage levels, and real-time online multiplayer. You can drive various cars, complete missions, earn coins, upgrade your vehicles, and compete with other players from around the world.
-
Features of Dr. Driving 2
-
Some of the features that make Dr. Driving 2 an amazing game are:
-
-
Realistic driving physics and mechanics
-
Beautiful and detailed environments
-
Different game modes, such as Career, Online Match, Tournament, and Daily Challenge
-
Over 100 cars to choose from, each with different performance and customization options
-
Over 1000 levels to test your driving skills and knowledge
-
Leaderboards and achievements to track your progress and rank
-
Social features, such as chat, friend list, and clan system
-
-
Requirements for Dr. Driving 2
-
To play Dr. Driving 2 on your mobile device, you need to have:
-
-
An Android device with version 4.1 or higher or an iOS device with version 9.0 or higher
-
At least 100 MB of free storage space
-
A stable internet connection
-
-
To play Dr. Driving 2 on your PC, you need to have:
-
-
A Windows PC with Windows 10 (v2004) or Windows 11
-
A solid state drive (SSD) with at least 10 GB of available storage space
-
An Intel UHD Graphics 630 GPU or comparable
-
A processor with at least four CPU physical cores (some games require an Intel CPU)
-
At least 8 GB of RAM
-
A Windows admin account
-
Hardware virtualization turned on
-
-
How to Play Dr. Driving 2 on PC with Windows 11
-
If you have Windows 11 installed on your PC, you can play Android games natively without using any third-party emulators. This is because Windows 11 has a feature called Windows Subsystem for Android, which allows you to run Android apps inside Windows. However, this feature is still in beta testing and not available for everyone. You need to join the Windows Insider Program and opt for the Dev Channel to get access to it. You also need to install Google Play Games Beta and Amazon Appstore on your PC to download and play Dr. Driving 2. Here are the steps to do so:
-
Step 1: Install Google Play Games Beta on your PC
-
Google Play Games Beta is a service that lets you play Android games on your PC with Windows 11. It also syncs your game progress, achievements, and friends across devices. To install it, you need to:
-
-
Go to the [Google Play Games Beta page] and click on the "Join the beta" button.
-
Sign in with your Google account and accept the terms and conditions.
-
Wait for a confirmation email from Google and follow the instructions to download and install Google Play Games Beta on your PC.
-
-
Step 2: Install Amazon Appstore on your PC
-
Amazon Appstore is an alternative app store that offers a variety of Android apps and games, including Dr. Driving 2. To install it, you need to:
-
-
Go to the [Amazon Appstore page] and click on the "Download Amazon Appstore" button.
-
Open the downloaded file and follow the instructions to install Amazon Appstore on your PC.
-
Sign in with your Amazon account or create one if you don't have one.
-
-
Step 3: Download Dr. Driving 2 from Amazon Appstore
-
Once you have installed both Google Play Games Beta and Amazon Appstore on your PC, you can download Dr. Driving 2 from the latter. To do so, you need to:
-
How to download dr driving 2 for pc with gameloop emulator
-Dr driving 2 pc game free download full version
-Dr driving 2 online multiplayer on pc
-Dr driving 2 pc gameplay and review
-Best settings for dr driving 2 on pc
-Dr driving 2 pc system requirements and compatibility
-Dr driving 2 tips and tricks for pc players
-Dr driving 2 mod apk download for pc
-Dr driving 2 cheats and hacks for pc
-Dr driving 2 vs dr driving 1 comparison on pc
-Dr driving 2 new features and updates on pc
-Dr driving 2 best cars and upgrades on pc
-Dr driving 2 challenges and missions on pc
-Dr driving 2 graphics and sound quality on pc
-Dr driving 2 alternatives and similar games on pc
-How to play dr driving 2 with keyboard and mouse on pc
-How to install dr driving 2 on windows 10 pc
-How to fix dr driving 2 not working on pc
-How to transfer dr driving 2 data from android to pc
-How to record dr driving 2 gameplay on pc
-How to stream dr driving 2 on twitch or youtube from pc
-How to connect dr driving 2 with facebook or google play on pc
-How to earn coins and gems in dr driving 2 on pc
-How to unlock all levels and stages in dr driving 2 on pc
-How to customize your car and driver in dr driving 2 on pc
-How to change language and region in dr driving 2 on pc
-How to join or create a clan in dr driving 2 on pc
-How to chat and communicate with other players in dr driving 2 on pc
-How to invite and play with friends in dr driving 2 on pc
-How to rank up and level up in dr driving 2 on pc
-How to get free rewards and bonuses in dr driving 2 on pc
-How to access the shop and store in dr driving 2 on pc
-How to use the leaderboard and achievements in dr driving 2 on pc
-How to enable or disable notifications and sounds in dr driving 2 on pc
-How to contact the support team or report a bug in dr driving 2 on pc
-Dr driving 2 fun facts and trivia for pc users
-Dr driving 2 user reviews and ratings for pc version
-Dr driving 2 download size and speed for pc platform
-Dr driving 2 minimum and recommended specifications for pc devices
-Dr driving 2 pros and cons for playing on pc screen
-
-
Open Amazon Appstore on your PC and search for "Dr. Driving 2" in the search bar.
-
Select the game from the results and click on the "Get" button.
-
Wait for the game to download and install on your PC.
-
-
Step 4: Launch Dr. Driving 2 and enjoy
-
After installing Dr. Driving 2 on your PC, you can launch it from either Google Play Games Beta or Amazon Appstore. You will see a window that shows the game running inside Windows Subsystem for Android. You can use your mouse, keyboard, or gamepad to control the game. You can also adjust the settings, such as resolution, frame rate, sound, and graphics, from the menu bar at the top of the window. Enjoy playing Dr. Driving 2 on your PC with Windows 11!
-
How to Play Dr. Driving 2 on PC with Android Emulators
-
If you don't have Windows 11 or don't want to use Windows Subsystem for Android, you can still play Dr. Driving 2 on your PC with Android emulators. Android emulators are software that simulate an Android device on your PC, allowing you to run Android apps and games. There are many Android emulators available for Windows, but not all of them are compatible with Dr. Driving 2. Here are some of the best Android emulators for Dr. Driving 2 that we recommend:
-
What are Android Emulators?
-
An Android emulator is a software program that mimics an Android device on a computer. It allows users to run Android apps and games on their PCs without having to own an actual Android device. Android emulators are useful for various purposes, such as testing apps, playing games, accessing blocked websites, and more. However, they also have some drawbacks, such as consuming more resources, causing compatibility issues, and posing security risks. Therefore, users should be careful when choosing and using an Android emulator.
-
Best Android Emulators for Dr. Driving 2
-
Bluestacks 5 / MSI App Player
-
Bluestacks 5 is one of the most popular and powerful Android emulators for Windows. It has a sleek interface, fast performance, high compatibility, and rich features. It also supports Google Play Store, keyboard and mouse controls, gamepad support, multi-instance mode, screen recording, streaming, and more. Bluestacks 5 is compatible with Dr. Driving 2 and can run it smoothly on most PCs. You can download Bluestacks 5 from its [official website].
-
MSI App Player is a customized version of Bluestacks 5 that is designed for MSI gaming laptops. It has the same features and performance as Bluestacks 5, but with some additional benefits, such as MSI keyboard lighting, MSI True Color, and MSI Dragon Center. MSI App Player is also compatible with Dr. Driving 2 and can run it smoothly on MSI gaming laptops. You can download MSI App Player from its [official website].
-
Nox Player
-
Nox Player is another popular and powerful Android emulator for Windows. It has a simple interface, fast performance, high compatibility, and rich features. It also supports Google Play Store, keyboard and mouse controls, gamepad support, multi-instance mode, screen recording, streaming, and more. Nox Player is compatible with Dr. Driving 2 and can run it smoothly on most PCs. You can download Nox Player from its [official website].
-
Gameloop
-
Gameloop is a specialized Android emulator for Windows that focuses on gaming. It has a modern interface, optimized performance, high compatibility, and rich features. It also supports Google Play Store, keyboard and mouse controls, gamepad support, multi-instance mode, screen recording, streaming, and more. Gameloop is compatible with Dr. Driving 2 and can run it smoothly on most PCs. You can download Gameloop from its [official website].
-
Conclusion
-
Dr. Driving 2 is a fun and realistic driving simulation game that you can play on your PC with Windows 11 or Android emulators. In this article, we showed you two ways to download and play Dr. Driving 2 on your PC, depending on whether you have Windows 11 or not. We also introduced you to some of the best features and requirements of this game, as well as some frequently asked questions.
-
We hope you enjoyed this article and found it helpful. If you have any questions or feedback, please feel free to leave a comment below. Happy driving!
-
FAQs
-
Is Dr. Driving 2 free to play?
-
Yes, Dr. Driving 2 is free to play on both mobile devices and PCs. However, it contains ads and in-app purchases that can enhance your gameplay experience.
-
Can I play Dr. Driving 2 offline?
-
No, Dr. Driving 2 requires an internet connection to play. You need to connect to the internet to access the game modes, levels, cars, and other features.
-
Can I play Dr. Driving 2 with my friends?
-
Yes, Dr. Driving 2 supports online multiplayer modes that allow you to play with your friends or other players from around the world. You can join or create a clan, chat with other players, challenge them to races or tournaments, and earn rewards.
-
How can I save my progress in Dr. Driving 2?
-
You can save your progress in Dr. Driving 2 by signing in with your Google account or Facebook account. This will sync your game data across devices and platforms.
-
How can I contact the developer of Dr. Driving 2?
-
You can contact the developer of Dr. Driving 2 by sending an email to [support@dr-driving.com]. You can also visit their [official website] or follow them on [Facebook] or [Twitter] for more information.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download PES 2020 Lite APK for Android Play PES 2021 with Just 50 MB of Storage.md b/spaces/1phancelerku/anime-remove-background/Download PES 2020 Lite APK for Android Play PES 2021 with Just 50 MB of Storage.md
deleted file mode 100644
index c3436dd294529888df75ee86c22b84962adc8979..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download PES 2020 Lite APK for Android Play PES 2021 with Just 50 MB of Storage.md
+++ /dev/null
@@ -1,126 +0,0 @@
-
-
PES 2020 Lite 50 MB APK: How to Download and Play the Free Version of the Popular Football Game
-
If you are a fan of football games, you might have heard of PES 2020, the latest installment of the eFootball Pro Evolution Soccer series by Konami. PES 2020 is a realistic and immersive football simulator that offers a variety of game modes, clubs, players, stadiums and features. However, if you don't want to spend money on buying the full-game, you can still enjoy a free-to-play version called PES 2020 Lite.
PES 2020 Lite is a reduced version of PES 2020 that allows you to play online matches with other users who have either the full-game or the lite version. You can also create your own team in myClub mode with a limited number of clubs available. In this article, we will explain what PES 2020 Lite is, how to download and install it on different platforms, and what are the pros and cons of playing it.
-
What is PES 2020 Lite?
-
PES 2020 Lite is a free-to-play version of PES 2020 that was released on December 9, 2019 for PS4, Xbox One and PC Steam. It is basically a demo version of the full-game that lets you try out some of its features and modes. You can play offline matches in local match, CO-OP and training modes, or online matches in myClub and eFootball modes. You can also edit some settings and customize your team in edit mode (only available for PS4 and Steam).
-
Features of PES 2020 Lite
-
PES 2020 Lite has some features that are similar to the full-game, such as:
-
-
The gameplay engine that delivers realistic and dynamic football action
-
The graphics and animations that create lifelike players and stadiums
-
The commentary and sound effects that enhance the atmosphere of the matches
-
The licenses and partnerships with official leagues, clubs and players
-
The updates and events that keep the game fresh and exciting
-
-
However, PES 2020 Lite also has some limitations that differentiate it from the full-game, such as:
-
-
The game modes that are only available in the full-game are master league, become a legend, matchday (offline), online divisions, online CO-OP (online) and random selection match
-
The clubs that are only available in kick-off mode are FC Barcelona, FC Bayern München, Manchester United, Juventus, Arsenal, Palmeiras, Flamengo, São Paulo, Corinthians, Vasco da Gama, Boca Juniors, Colo-Colo and River Plate
-
The stadiums that are only available are Allianz Arena (FC Bayern Munich), Allianz Parque (Palmeiras) and Allianz Stadium (Juventus)
-
-
How to download PES 2020 Lite for PS4, Xbox One and PC Steam
-
If you want to play PES 2020 Lite on your PS4, Xbox One or PC Steam, you can download it for free from the following links:
After downloading the game, you can launch it from your console or PC and start playing. You will need an internet connection and a Konami ID to access some of the online features. You can also link your game data with your eFootball PES 2020 mobile app if you have one.
-
pes 2020 lite free download
-pes 2020 lite for pc steam
-pes 2020 lite myclub mode
-pes 2020 lite kick off mode
-pes 2020 lite clubs and stadiums
-pes 2020 lite faq and features
-pes 2020 lite file size and system requirements
-pes 2020 lite save data transfer
-pes 2020 lite youtube gameplay
-pes 2020 lite mediafire link
-pes 2020 lite apk mod
-pes 2020 lite offline mode
-pes 2020 lite online match
-pes 2020 lite edit mode
-pes 2020 lite efootball mode
-pes 2020 lite matchday mode
-pes 2020 lite allianz arena
-pes 2020 lite boca juniors
-pes 2020 lite fc barcelona
-pes 2020 lite fc bayern münchen
-pes 2020 lite juventus
-pes 2020 lite manchester united
-pes 2020 lite arsenal
-pes 2020 lite palmeiras
-pes 2020 lite flamengo
-pes 2020 lite são paulo
-pes 2020 lite corinthians
-pes 2020 lite vasco da gama
-pes 2020 lite colo-colo
-pes 2020 lite river plate
-pes 2020 lite net energy gain experiment
-pes 2020 lite holy grail fusion experiment
-pes 2020 lite mini sun experiment
-pes 2020 lite nuclear fusion reaction temperature
-pes 2020 lite kstar facility korea institute of fusion energy
-pes 2020 lite fifplay website review
-pes 2020 lite sahte forvet youtube channel review
-pes 2020 lite weebly website review
-pes 2020 lite screenshot and video tutorial
-pes 2020 lite download and installation guide
-pes 2020 lite tips and tricks for beginners
-pes 2020 lite best players and teams to use in myclub mode
-pes 2020 lite how to earn myclub coins and gp fast
-pes 2020 lite how to improve your skills and tactics in kick off mode
-pes 2020 lite how to customize your team and players in edit mode
-pes 2020 lite how to participate and win in efootball mode
-pes 2020 lite how to enjoy matchday mode with your friends
-pes 2020 lite how to fix common errors and issues in the game
-pes 2020 lite how to update the game to the latest version
-pes 2020 lite how to contact the game support team for help
-
How to install PES 2020 Lite on Android devices
-
If you want to play PES 2020 Lite on your Android device, you will need to download and install an APK file that is not available on the Google Play Store. This is because PES 2020 Lite is not an official app by Konami, but a modified version of PES 2020 mobile that has been compressed to reduce the file size. Therefore, you will need to follow these steps to install it:
Enable the installation of apps from unknown sources on your device settings
-
Install the APK file on your device
-
Extract the OBB data file using a file manager app and copy the folder "jp.konami.pesam" to the path "Android/OBB" on your device storage
-
Launch the game and enjoy playing PES 2020 Lite on your Android device
-
-
Note that you will also need an internet connection and a Konami ID to play the game online. You may also encounter some errors or bugs while playing, as this is not an official app by Konami.
-
Pros and cons of PES 2020 Lite
-
PES 2020 Lite is a great option for those who want to experience PES 2020 without spending any money. However, it also has some drawbacks that you should be aware of before playing. Here are some of the pros and cons of PES 2020 Lite:
-
Pros
-
Free to play
-
The biggest advantage of PES 2020 Lite is that it is free to play. You don't need to pay anything to download and play the game, unlike the full-game that costs around $60. You can enjoy playing online matches with other users who have either the full-game or the lite version, and create your own team in myClub mode with a limited number of clubs available.
-
Online matches with full-game users
-
PES 2020 Lite allows you to play online matches with other users who have either the full-game or the lite version. This means that you can compete with a large and diverse player base, and test your skills against different opponents. You can also participate in online events and tournaments that are held regularly by Konami.
-
MyClub mode with limited clubs
-
PES 2020 Lite gives you access to myClub mode, which is one of the most popular modes in PES 2020. In myClub mode, you can create your own team by signing players, managers, coaches and scouts. You can also customize your team's kits, badges, formations and tactics. However, in PES 2020 Lite, you can only choose from a limited number of clubs, such as FC Barcelona, Manchester United, Juventus and Bayern Munich.
-
Cons
-
Limited game modes and clubs
-
The biggest disadvantage of PES 2020 Lite is that it has limited game modes and clubs compared to the full-game. You cannot play some of the most popular modes in PES 2020, such as master league, become a legend, matchday (offline), online divisions, online CO-OP (online) and random selection match. You also cannot choose from all the clubs that are available in kick-off mode, such as Arsenal, Liverpool, Real Madrid, PSG and more.
-
No data transfer to full-game
-
PES 2020 Lite does not allow you to transfer your data to the full-game if you decide to buy it later. This means that you will lose all your progress and achievements in PES 2020 Lite, such as your myClub team, your online match records, your edit data and your coins. You will have to start from scratch if you buy the full-game, which can be frustrating and discouraging.
-
Large file size
-
PES 2020 Lite may be a free-to-play version of PES 2020, but it still requires a lot of storage space on your device. The game size is around 40 GB for PS4, Xbox One and PC Steam, and around 1.5 GB for Android devices. This means that you will need to have enough free space on your device to download and install the game, and also to update it regularly. You may also experience some lag or slow loading times if your device is not powerful enough to run the game smoothly.
-
Frequently asked questions about PES 2020 Lite
-
Here are some of the most common questions that users have about PES 2020 Lite:
-
-
Is PES 2020 Lite worth playing?
-
PES 2020 Lite is worth playing if you are looking for a free-to-play football game that offers realistic and immersive gameplay, graphics and sound. You can play online matches with other users who have either the full-game or the lite version, and create your own team in myClub mode with a limited number of clubs available. However, you should also be aware of the limitations and drawbacks of PES 2020 Lite, such as the limited game modes and clubs, the no data transfer to full-game, and the large file size.
-
Can I play PES 2020 Lite offline?
-
PES 2020 Lite can be played offline in local match, CO-OP and training modes. However, you will need an internet connection to play online matches in myClub and eFootball modes, and to access some of the online features and events. You will also need an internet connection to download and update the game.
-
How can I get more coins in PES 2020 Lite?
-
Coins are the premium currency in PES 2020 Lite that can be used to buy players, managers, scouts and other items in myClub mode. You can get more coins by completing achievements, participating in events, logging in daily, or buying them with real money.
-
How can I play with friends in PES 2020 Lite?
-
You can play with friends in PES 2020 Lite by inviting them to join your room in online CO-OP mode (only available for PS4, Xbox One and PC Steam), or by adding them as friends in myClub mode and challenging them to friendly matches.
-
How can I update PES 2020 Lite?
-
You can update PES 2020 Lite by downloading the latest version of the game from the links provided above, or by launching the game and following the instructions on the screen. You will need an internet connection to update the game.
-
-
Conclusion
-
PES 2020 Lite is a free-to-play version of PES 2020 that lets you enjoy some of its features and modes without spending any money. You can play online matches with other users who have either the full-game or the lite version, and create your own team in myClub mode with a limited number of clubs available. However, you should also be aware of the limitations and drawbacks of PES 2020 Lite, such as the limited game modes and clubs, the no data transfer to full-game, and the large file size. If you want to experience PES 2020 fully, you will need to buy the full-game.
-
We hope that this article has helped you understand what PES 2020 Lite is, how to download and install it on different platforms, and what are the pros and cons of playing it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Explore Upland A City of History Culture and Adventure.md b/spaces/1phancelerku/anime-remove-background/Explore Upland A City of History Culture and Adventure.md
deleted file mode 100644
index 8934a8761c65b18c50e13bd60accc9251c3dcb77..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Explore Upland A City of History Culture and Adventure.md
+++ /dev/null
@@ -1,104 +0,0 @@
-
-
Upland: A Blockchain Game Where You Can Rebuild the World
-
Have you ever dreamed of owning a piece of land in your favorite city? Or maybe you want to create your own neighborhood, business, or arcade in a virtual world? If so, you might want to check out Upland, a blockchain game that lets you buy, sell, and trade virtual properties mapped to the real world.
-
Upland is a property-trading, city-building metaverse that uses blockchain technology to ensure true ownership of digital assets. By playing Upland, you can explore cities, collect properties, earn UPX coins and USD, and connect with other players. You can also participate in various activities and events that make Upland an immersive and fun experience.
In this article, we will give you an overview of what Upland is, how it works, and what are the benefits of playing it. We will also cover some of the features, future plans, and FAQs about Upland. So, if you are ready to rebuild the world with Upland, read on!
-
Upland Features
-
Upland is more than just a game. It is a metaverse that offers a range of features and possibilities for players. Here are some of the main features that make Upland unique and enjoyable.
-
Property Trading
-
One of the core features of Upland is property trading. In Upland, you can buy, sell, and trade virtual properties that are mapped to real-world addresses. Each property is represented by a non-fungible token (NFT) on the EOS blockchain, which means that you have full ownership and control over your digital assets.
-
To buy a property in Upland, you need to use UPX coins, which are the native currency of the game. You can earn UPX coins by exploring the map, completing collections, participating in events, or buying them with fiat or crypto. You can also sell your properties for UPX coins or USD on the open marketplace.
-
By trading properties in Upland, you can increase your net worth, level up your status, unlock new features, and earn passive income. You can also use your properties to create businesses, services, or attractions that add value to the metaverse.
-
City Building
-
Another feature of Upland is city building. In Upland, you can explore different cities around the world and collect properties that match your preferences. You can also discover landmarks, monuments, historical sites, and other points of interest that make each city unique.
-
upland hunting
-upland game birds
-upland software
-upland cotton
-upland brewery
-upland real estate
-upland boots
-upland bike
-upland dog vest
-upland farms
-upland weather
-upland ca zip code
-upland hills country club
-upland animal shelter
-upland high school
-upland rice
-upland plants
-upland park hotel
-upland bird hunting gear
-upland definition geography
-upland vs lowland whisky
-upland sandpiper
-upland meadows crossword clue
-upland outfitters maine
-upland journal forum
-upland vest with hydration bladder
-upland game bird season california
-upland sour ale project
-upland apartments for rent
-upland police department
-upland optics perception hd 10x42mm binoculars
-upland golf course scorecard
-upland lemon festival 2023 dates
-upland hardwood forest ecosystem
-upland heath and moorlands management plan
-upland erosion control methods
-upland bird dog training tips
-upland grass seed mixtures
-upland habitat restoration projects
-upland quail hunting preserves near me
-upland public library hours of operation
-upland unified school district calendar 2023 2024
-upland mountain biking trails map
-upland water company pay bill online
-upland youth soccer league registration
-upland farmers market vendors list
-upland fire department station 164
-upland dental group and implant center
-upland christian academy tuition fees
-upland community church live stream
-
Currently, Upland has launched several cities in the US, such as San Francisco, New York, Fresno, Bakersfield, Oakland, Cleveland, Chicago, Kansas City, Santa Clara, Manhattan Beach. More cities are planned to be added in the future.
-
Besides exploring cities, you can also create and join neighborhoods and communities in Upland. You can collaborate with other players to develop and improve your areas, share ideas and tips, and have fun together. You can also join Upland's official Discord server, where you can chat with other players, get support, and stay updated on the latest news and announcements.
-
Metaverse Activities
-
The last feature of Upland that we will discuss is metaverse activities. In Upland, you can do more than just buying and selling properties. You can also engage in various activities and events that make the game more interactive and enjoyable.
-
Some of the activities that you can do in Upland are:
-
-
Collecting NFTs: You can collect different types of NFTs in Upland, such as cars, planes, trains, art, sports cards, and more. You can use your NFTs to customize your avatar, travel faster, or display them in your properties.
-
Playing mini-games: You can play mini-games in Upland, such as treasure hunts, scavenger hunts, quizzes, puzzles, and more. You can win prizes, UPX coins, NFTs, or badges by playing these games.
-
Competing in leaderboards: You can compete with other players in Upland's leaderboards, such as net worth, collections, earnings, visits, and more. You can earn rewards, recognition, and bragging rights by ranking high on these leaderboards.
-
-
Besides these activities, you can also participate in various events and challenges that are regularly organized by Upland's team or community. These events and challenges can range from themed collections, property auctions, trivia contests, scavenger hunts, and more. You can have fun, meet new people, and win amazing prizes by joining these events and challenges.
-
Upland Future Plans
-
Upland is not just a game that is already finished. It is a game that is constantly evolving and improving. Upland's team has a vision to create a metaverse that is rich in content, features, and possibilities for players.
-
Some of the future plans that Upland has are:
-
-
Expanding to more cities and countries: Upland plans to launch more cities and countries in the future, such as Los Angeles, London, Paris, Tokyo, Berlin, and more. This will allow players to explore more places and cultures in the metaverse.
-
Adding more functionality and utility to properties: Upland plans to add more functionality and utility to properties in the future, such as allowing players to build structures, businesses, services, or attractions on their properties. This will allow players to create more value and income from their properties.
-
Integrating with other platforms and projects: Upland plans to integrate with other platforms and projects in the future, such as Decentraland, Sandbox, CryptoKitties, NBA Top Shot, and more. This will allow players to access more content and features from other platforms and projects in the metaverse.
-
-
These are just some of the future plans that Upland has. There are more plans and ideas that are being developed and discussed by Upland's team and community. You can follow Upland's blog, Twitter, or Discord to stay updated on the latest developments and updates for Upland.
-
Conclusion
-
Upland is a blockchain game that lets you buy, sell, and trade virtual properties mapped to the real world. You can also explore cities, collect properties, earn UPX coins and USD, and connect with other players. You can also participate in various activities and events that make Upland an immersive and fun experience.
-
Upland is more than just a game. It is a metaverse that offers a range of features and possibilities for players. Upland is also a game that is constantly evolving and improving, with more cities, countries, functionality, utility, and integration planned for the future.
-
If you are interested in playing Upland, you can join the game by visiting their website and signing up with your email or phone number. You will also need an EOS account to play Upland, which you can create for free using the Wombat wallet app. Once you have your account, you can start playing Upland and rebuild the world with your own vision.
-
FAQs
-
Here are some of the frequently asked questions about Upland:
-
-
What is the difference between upland and lowland in geography?
-
In geography, upland and lowland are terms that describe the elevation of land. Upland refers to land that is higher than the surrounding area, such as hills, mountains, or plateaus. Lowland refers to land that is lower than the surrounding area, such as plains, valleys, or coasts.
-
What is the difference between upland and lowland in ecology?
-
In ecology, upland and lowland are terms that describe the habitat of plants and animals. Upland refers to habitats that are dry, cool, and have poor soil quality, such as grasslands, heathlands, or moorlands. Lowland refers to habitats that are wet, warm, and have rich soil quality, such as forests, wetlands, or marshes.
-
What is the difference between upland and lowland in blockchain gaming?
-
In blockchain gaming, upland and lowland are terms that describe the status of players in Upland. Upland refers to players who have verified their identity and have access to all the features and benefits of the game. Lowland refers to players who have not verified their identity and have limited access to some of the features and benefits of the game.
-
How can I join Upland and start playing?
-
You can join Upland by visiting their website and signing up with your email or phone number. You will also need an EOS account to play Upland, which you can create for free using the Wombat wallet app. Once you have your account, you can start playing Upland and rebuild the world with your own vision.
-
Where can I find more information and resources about Upland?
-
You can find more information and resources about Upland by visiting their blog, Twitter, or Discord. You can also check out their wiki, FAQs, or support page. You can also watch some of their videos or listen to some of their podcasts.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1toTree/lora_test/ppdiffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py b/spaces/1toTree/lora_test/ppdiffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py
deleted file mode 100644
index 22480b446c355c338389acbe97710f675b624263..0000000000000000000000000000000000000000
--- a/spaces/1toTree/lora_test/ppdiffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-# Copyright 2022 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import inspect
-from typing import List, Optional, Tuple, Union
-
-import paddle
-
-from ...models import UNet2DModel, VQModel
-from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
-from ...schedulers import DDIMScheduler
-
-
-class LDMPipeline(DiffusionPipeline):
- r"""
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving, running on a particular xxxx, etc.)
-
- Parameters:
- vqvae ([`VQModel`]):
- Vector-quantized (VQ) Model to encode and decode images to and from latent representations.
- unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image latents.
- scheduler ([`SchedulerMixin`]):
- [`DDIMScheduler`] is to be used in combination with `unet` to denoise the encoded image latents.
- """
-
- def __init__(self, vqvae: VQModel, unet: UNet2DModel, scheduler: DDIMScheduler):
- super().__init__()
- self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler)
-
- @paddle.no_grad()
- def __call__(
- self,
- batch_size: int = 1,
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
- eta: float = 0.0,
- num_inference_steps: int = 50,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- **kwargs,
- ) -> Union[Tuple, ImagePipelineOutput]:
- r"""
- Args:
- batch_size (`int`, *optional*, defaults to 1):
- Number of images to generate.
- generator (`paddle.Generator`, *optional*):
- One or a list of paddle generator(s) to make generation deterministic.
- num_inference_steps (`int`, *optional*, defaults to 50):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- output_type (`str`, *optional*, defaults to `"pil"`):
- The output format of the generate image. Choose between
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
-
- Returns:
- [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
- `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
- generated images.
- """
-
- latents = paddle.randn(
- (batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size),
- generator=generator,
- )
-
- # scale the initial noise by the standard deviation required by the scheduler
- latents = latents * self.scheduler.init_noise_sigma
-
- self.scheduler.set_timesteps(num_inference_steps)
-
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
-
- extra_kwargs = {}
- if accepts_eta:
- extra_kwargs["eta"] = eta
-
- for t in self.progress_bar(self.scheduler.timesteps):
- latent_model_input = self.scheduler.scale_model_input(latents, t)
- # predict the noise residual
- noise_prediction = self.unet(latent_model_input, t).sample
- # compute the previous noisy sample x_t -> x_t-1
- latents = self.scheduler.step(noise_prediction, t, latents, **extra_kwargs).prev_sample
-
- # decode the image latents with the VAE
- image = self.vqvae.decode(latents).sample
-
- image = (image / 2 + 0.5).clip(0, 1)
- image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
- if output_type == "pil":
- image = self.numpy_to_pil(image)
-
- if not return_dict:
- return (image,)
-
- return ImagePipelineOutput(images=image)
diff --git a/spaces/1toTree/lora_test/ppdiffusers/version.py b/spaces/1toTree/lora_test/ppdiffusers/version.py
deleted file mode 100644
index 657b89ee325da9d8c2cb6aaefc77c63b66730f55..0000000000000000000000000000000000000000
--- a/spaces/1toTree/lora_test/ppdiffusers/version.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# this file will be generated by tools
-# please not modify it.
-VERSION = "0.0.0"
diff --git a/spaces/7hao/bingo/src/components/turn-counter.tsx b/spaces/7hao/bingo/src/components/turn-counter.tsx
deleted file mode 100644
index 08a9e488f044802a8600f4d195b106567c35aab4..0000000000000000000000000000000000000000
--- a/spaces/7hao/bingo/src/components/turn-counter.tsx
+++ /dev/null
@@ -1,23 +0,0 @@
-import React from 'react'
-import { Throttling } from '@/lib/bots/bing/types'
-
-export interface TurnCounterProps {
- throttling?: Throttling
-}
-
-export function TurnCounter({ throttling }: TurnCounterProps) {
- if (!throttling) {
- return null
- }
-
- return (
-
")
- return content
-
- @classmethod
- def general_filter(cls, content, agent_name):
- return content
-
- @classmethod
- def filter(cls, content: str, agent_name: str, ui_name: str):
- """
- Description:
- Make certain modifications to the output content to enhance its aesthetics when content is showed in gradio.
- Input:
- content: output content
- agent_name: Whose output is it
- ui_name: What UI is currently launching
- Output:
- Modified content
- """
- mapping = {
- "SingleAgentUI": cls.singleagent_filter,
- "DebateUI": cls.debate_filter,
- "NovelUI": cls.novel_filter,
- "CodeUI": cls.code_filter,
- "GeneralUI": cls.general_filter
- }
- if ui_name in mapping:
- return mapping[ui_name](content, agent_name)
- else:
- return content
-
-class Client:
- """
- For inter-process communication, this is the client.
- `gradio_backend.PY` serves as the backend, while `run_gradio` is the frontend.
- Communication between the frontend and backend is accomplished using Sockets.
- """
- # =======================Radio Const String======================
- SINGLE_MODE = "Single Mode"
- AUTO_MODE = "Auto Mode"
- MODE_LABEL = "Select the execution mode"
- MODE_INFO = "Single mode refers to when the current agent output ends, it will stop running until you click to continue. Auto mode refers to when you complete the input, all agents will continue to output until the task ends."
- # ===============================================================
- mode = AUTO_MODE
- FIRST_RUN:bool = True
- # if last agent is user, then next agent will be executed automatically rather than click button
- LAST_USER:bool = False
-
- receive_server = None
- send_server = None
- current_node = None
- cache = {}
-
- def __init__(self, host=HOST, port=PORT, bufsize=1024):
- assert Client.mode in [Client.SINGLE_MODE, Client.AUTO_MODE]
- self.SIGN = SPECIAL_SIGN
- self.bufsize = bufsize
- assert bufsize > 0
- self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.client_socket.connect((host, port))
- while True:
- data = self.client_socket.recv(self.bufsize).decode('utf-8')
- if data == "hi":
- self.client_socket.send("hello agent".encode('utf-8'))
- time.sleep(1)
- elif data == "check":
- break
- print_log("Client: connecting successfully......")
-
- def start_server(self):
- while True:
- message = yield
- if message == 'exit':
- break
- self.send_message(message=message)
-
- def send_message(self, message):
- """Send the message to the server."""
- if isinstance(message, list) or isinstance(message, dict):
- message = str(message)
- assert isinstance(message, str)
- message = message + self.SIGN["SPLIT"]
- self.client_socket.send(message.encode('utf-8'))
-
- def receive_message(self, end_identifier: str = None, split_identifier: str = SPECIAL_SIGN["SPLIT"]) -> List:
- """Receive messages from the server, and it will block the process. Supports receiving long text."""
- remaining = ""
- while True:
- # receive message
- dataset = self.client_socket.recv(self.bufsize)
- try:
- # If decoding fails, it indicates that the current transmission is a long text.
- dataset = dataset.decode('utf-8')
- except UnicodeDecodeError:
- if not isinstance(remaining, bytes):
- remaining = remaining.encode('utf-8')
- assert isinstance(dataset, bytes)
- remaining += dataset
- try:
- dataset = remaining.decode('utf-8')
- remaining = ""
- except UnicodeDecodeError:
- continue
- assert isinstance(remaining, str)
- dataset = remaining + dataset
- list_dataset = dataset.split(split_identifier)
- if len(list_dataset) == 1:
- # If there is only one result from the split, it indicates that the current sequence itself has not yet ended.
- remaining = list_dataset[0]
- continue
- else:
- remaining = list_dataset[-1]
- # Receive successfully
- list_dataset = list_dataset[:-1]
- return_value = []
- for item in list_dataset:
- if end_identifier is not None and item == end_identifier:
- break
- return_value.append(item)
- identifier = yield return_value
- if identifier is not None:
- end_identifier, split_identifier = identifier
-
- def listening_for_start_(self):
- """
- When the server starts, the client is automatically launched.
- At this point, process synchronization is required,
- such as sending client data to the server for rendering,
- then the server sending the modified data back to the client,
- and simultaneously sending a startup command.
- Once the client receives the data, it will start running.
- """
- Client.receive_server = self.receive_message()
- # Waiting for information from the server.
- data: list = next(Client.receive_server)
- assert len(data) == 1
- data = eval(data[0])
- assert isinstance(data, dict)
- Client.cache.update(data)
- # Waiting for start command from the server.
- data:list = Client.receive_server.send(None)
- assert len(data) == 1
- assert data[0] == ""
-
-class WebUI:
- """
- The base class for the frontend, which encapsulates some functions for process information synchronization.
- When a new frontend needs to be created, you should inherit from this class,
- then implement the `construct_ui()` method and set up event listeners.
- Finally, execute `run()` to load it.
- """
-
- def receive_message(
- self,
- end_identifier:str=None,
- split_identifier:str=SPECIAL_SIGN["SPLIT"]
- )->List:
- """This is the same as in Client class."""
- yield "hello"
- remaining = ""
- while True:
- dataset = self.client_socket.recv(self.bufsize)
- try:
- dataset = dataset.decode('utf-8')
- except UnicodeDecodeError:
- if not isinstance(remaining, bytes):
- remaining = remaining.encode('utf-8')
- assert isinstance(dataset, bytes)
- remaining += dataset
- try:
- dataset = remaining.decode('utf-8')
- remaining = ""
- except UnicodeDecodeError:
- continue
- assert isinstance(remaining, str)
- dataset = remaining + dataset
- list_dataset = dataset.split(split_identifier)
- if len(list_dataset) == 1:
- remaining = list_dataset[0]
- continue
- else:
- remaining = list_dataset[-1]
- list_dataset = list_dataset[:-1]
- return_value = []
- for item in list_dataset:
- if end_identifier is not None and item == end_identifier:
- break
- return_value.append(item)
- identifier = yield return_value
- if identifier is not None:
- end_identifier, split_identifier = identifier
-
- def send_message(self, message:str):
- """Send message to client."""
- SEP = self.SIGN["SPLIT"]
- self.client_socket.send(
- (message+SEP).encode("utf-8")
- )
-
- def _connect(self):
- # check
- if self.server_socket:
- self.server_socket.close()
- assert not os.path.isfile("PORT.txt")
- self.socket_port = check_port(PORT)
- # Step1. initialize
- self.server_socket = socket.socket(
- socket.AF_INET, socket.SOCK_STREAM
- )
- # Step2. binding ip and port
- self.server_socket.bind((self.socket_host, self.socket_port))
- # Step3. run client
- self._start_client()
-
- # Step4. listening for connect
- self.server_socket.listen(1)
-
- # Step5. test connection
- client_socket, client_address = self.server_socket.accept()
- print_log("server: establishing connection......")
- self.client_socket = client_socket
- while True:
- client_socket.send("hi".encode('utf-8'))
- time.sleep(1)
- data = client_socket.recv(self.bufsize).decode('utf-8')
- if data == "hello agent":
- client_socket.send("check".encode('utf-8'))
- print_log("server: connect successfully")
- break
- assert os.path.isfile("PORT.txt")
- os.remove("PORT.txt")
- if self.receive_server:
- del self.receive_server
- self.receive_server = self.receive_message()
- assert next(self.receive_server) == "hello"
-
- @abstractmethod
- def render_and_register_ui(self):
- # You need to implement this function.
- # The function's purpose is to bind the name of the agent with an image.
- # The name of the agent is stored in `self.cache[]`,
- # and the function for binding is in the method `add_agents` of the class `GradioConfig` in `Gradio_Config/gradio_config.py``.
- # This function will be executed in `self.first_recieve_from_client()`
- pass
-
- def first_recieve_from_client(self, reset_mode:bool=False):
- """
- This function is used to receive information from the client and is typically executed during the initialization of the class.
- If `reset_mode` is False, it will bind the name of the agent with an image.
- """
- self.FIRST_RECIEVE_FROM_CLIENT = True
- data_list:List = self.receive_server.send(None)
- assert len(data_list) == 1
- data = eval(data_list[0])
- assert isinstance(data, dict)
- self.cache.update(data)
- if not reset_mode:
- self.render_and_register_ui()
-
- def _second_send(self, message:dict):
- # Send the modified message.
- # It will be executed in `self.send_start_cmd()` automatically.
- self.send_message(str(message))
-
- def _third_send(self):
- # Send start command.
- # It will be executed in `self.send_start_cmd()` automatically.
- self.send_message(self.SIGN['START'])
-
- def send_start_cmd(self, message:dict={"hello":"hello"}):
- # If you have no message to send, you can ignore the args `message`.
- assert self.FIRST_RECIEVE_FROM_CLIENT, "Please make sure you have executed `self.first_recieve_from_client()` manually."
- self._second_send(message=message)
- time.sleep(1)
- self._third_send()
- self.FIRST_RECIEVE_FROM_CLIENT = False
-
- def __init__(
- self,
- client_cmd: list, # ['python','test.py','--a','b','--c','d']
- socket_host: str = HOST,
- socket_port: int = PORT,
- bufsize: int = 1024,
- ui_name: str = ""
- ):
- self.ui_name = ui_name
- self.server_socket = None
- self.SIGN = SPECIAL_SIGN
- self.socket_host = socket_host
- self.socket_port = socket_port
- self.bufsize = bufsize
- self.client_cmd = client_cmd
-
- self.receive_server = None
- self.cache = {}
- assert self.bufsize > 0
- self._connect()
-
- def _start_client(self):
- print(f"server: executing `{' '.join(self.client_cmd)}` ...")
- self.backend = subprocess.Popen(self.client_cmd)
-
- def _close_client(self):
- print(f"server: killing `{' '.join(self.client_cmd)}` ...")
- self.backend.terminate()
-
- def reset(self):
- print("server: restarting ...")
- self._close_client()
- time.sleep(1)
- self._connect()
-
- def render_bubble(self, rendered_data, agent_response, node_name, render_node_name:bool=True):
- # Rendered bubbles (HTML format) are used for gradio output.
- output = f"**{node_name}** " if render_node_name else ""
- for item in agent_response:
- for agent_name in item:
- content = item[agent_name].replace("\n", " ")
- content = UIHelper.filter(content, agent_name, self.ui_name)
- output = f"{output} {UIHelper.wrap_css(content, agent_name)}"
- rendered_data[-1] = [rendered_data[-1][0], output]
- return rendered_data
-
- def run(self,share: bool = True):
- self.demo.queue()
- self.demo.launch(share=share)
-
-
-if __name__ == '__main__':
- pass
diff --git a/spaces/AIatUIUC/CodeLATS/executors/factory.py b/spaces/AIatUIUC/CodeLATS/executors/factory.py
deleted file mode 100644
index 995e8d83f7567bcceb2ec9e4ad92ccf771bd88ee..0000000000000000000000000000000000000000
--- a/spaces/AIatUIUC/CodeLATS/executors/factory.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from .py_executor import PyExecutor
-from .executor_types import Executor
-
-def executor_factory(lang: str) -> Executor:
- if lang == "py" or lang == "python":
- return PyExecutor()
- else:
- raise ValueError(f"Invalid language for executor: {lang}")
diff --git a/spaces/ASJMO/freegpt/g4f/Provider/Providers/Yqcloud.py b/spaces/ASJMO/freegpt/g4f/Provider/Providers/Yqcloud.py
deleted file mode 100644
index ad5c3a4326c68ceb7ee012fbf5bc072da72a7e40..0000000000000000000000000000000000000000
--- a/spaces/ASJMO/freegpt/g4f/Provider/Providers/Yqcloud.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import os
-import time
-import requests
-
-from ...typing import sha256, Dict, get_type_hints
-url = 'https://chat9.yqcloud.top/'
-model = [
- 'gpt-3.5-turbo',
-]
-supports_stream = True
-needs_auth = False
-
-
-def _create_completion(model: str, messages: list, stream: bool, chatId: str, **kwargs):
-
- headers = {
- 'authority': 'api.aichatos.cloud',
- 'origin': 'https://chat9.yqcloud.top',
- 'referer': 'https://chat9.yqcloud.top/',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
- }
-
- json_data = {
- 'prompt': str(messages),
- 'userId': f'#/chat/{chatId}',
- 'network': True,
- 'apikey': '',
- 'system': '',
- 'withoutContext': False,
- }
- response = requests.post('https://api.aichatos.cloud/api/generateStream',
- headers=headers, json=json_data, stream=True)
- for token in response.iter_content(chunk_size=2046):
- yield (token.decode('utf-8'))
-
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/__init__.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-150e_deepfashion2_short_sleeved_dress_256x192.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-150e_deepfashion2_short_sleeved_dress_256x192.py
deleted file mode 100644
index 84595bb2e5f185814d1df44ee8c3681ae17deb5a..0000000000000000000000000000000000000000
--- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-150e_deepfashion2_short_sleeved_dress_256x192.py
+++ /dev/null
@@ -1,172 +0,0 @@
-_base_ = [
- '../../../_base_/default_runtime.py',
- '../../../_base_/datasets/deepfashion2.py'
-]
-
-default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater'))
-
-resume = False # 断点恢复
-load_from = None # 模型权重加载
-train_cfg = dict(by_epoch=True, max_epochs=150, val_interval=10) # 训练轮数,测试间隔
-param_scheduler = [
- dict( # warmup策略
- type='LinearLR',
- begin=0,
- end=500,
- start_factor=0.001,
- by_epoch=False),
- dict( # scheduler
- type='MultiStepLR',
- begin=0,
- end=150,
- milestones=[100, 130],
- gamma=0.1,
- by_epoch=True)
-]
-optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) # 优化器和学习率
-auto_scale_lr = dict(base_batch_size=512) # 根据batch_size自动缩放学习率
-
-backend_args = dict(backend='local') # 数据加载后端设置,默认从本地硬盘加载
-dataset_type = 'DeepFashion2Dataset' # 数据集类名 DeepFashionDataset
-data_mode = 'topdown' # 算法结构类型,用于指定标注信息加载策略
-data_root = 'data/deepfashion2/' # 数据存放路径
-# 定义数据编解码器,用于生成target和对pred进行解码,同时包含了输入图片和输出heatmap尺寸等信息
-codec = dict(
- type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
-
-train_pipeline = [
- dict(type='LoadImage'),
- dict(type='GetBBoxCenterScale'),
- dict(type='RandomFlip', direction='horizontal'),
- dict(
- type='RandomBBoxTransform',
- shift_prob=0,
- rotate_factor=60,
- scale_factor=(0.75, 1.25)),
- dict(type='TopdownAffine', input_size=codec['input_size']),
- dict(type='GenerateTarget', encoder=codec),
- dict(type='PackPoseInputs')
-]
-val_pipeline = [ # 测试时数据增强
- dict(type='LoadImage', backend_args=backend_args), # 加载图片
- dict(type='GetBBoxCenterScale'), # 根据bbox获取center和scale
- dict(type='TopdownAffine', input_size=codec['input_size']), # 根据变换矩阵更新目标数据
- dict(type='PackPoseInputs') # 对target进行打包用于训练
-]
-train_dataloader = dict( # 训练数据加载
- batch_size=64, # 批次大小
- num_workers=6, # 数据加载进程数
- persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
- sampler=dict(type='DefaultSampler', shuffle=True), # 采样策略,打乱数据
- dataset=dict(
- type=dataset_type, # 数据集类名
- data_root=data_root, # 数据集路径
- data_mode=data_mode, # 算法类型
- ann_file='train/deepfashion2_short_sleeved_dress.json', # 标注文件路径
- data_prefix=dict(img='train/image/'), # 图像路径
- pipeline=train_pipeline # 数据流水线
- ))
-val_dataloader = dict(
- batch_size=32,
- num_workers=6,
- persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
- drop_last=False,
- sampler=dict(type='DefaultSampler', shuffle=False), # 采样策略,不进行打乱
- dataset=dict(
- type=dataset_type, # 数据集类名
- data_root=data_root, # 数据集路径
- data_mode=data_mode, # 算法类型
- ann_file='validation/deepfashion2_short_sleeved_dress.json', # 标注文件路径
- data_prefix=dict(img='validation/image/'), # 图像路径
- test_mode=True, # 测试模式开关
- pipeline=val_pipeline # 数据流水线
- ))
-test_dataloader = val_dataloader # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
-
-channel_cfg = dict(
- num_output_channels=294,
- dataset_joints=294,
- dataset_channel=[
- [
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
- 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
- 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
- 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
- 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
- 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
- 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
- 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
- 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
- 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
- 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
- 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
- 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193,
- 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
- 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
- 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
- 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
- 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
- 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
- 285, 286, 287, 288, 289, 290, 291, 292, 293
- ],
- ],
- inference_channel=[
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
- 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
- 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
- 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
- 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
- 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
- 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
- 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
- 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
- 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
- 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
- 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
- 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
- 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
- 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
- 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
- 290, 291, 292, 293
- ])
-
-model = dict(
- type='TopdownPoseEstimator', # 模型结构决定了算法流程
- data_preprocessor=dict( # 数据归一化和通道顺序调整,作为模型的一部分
- type='PoseDataPreprocessor',
- mean=[123.675, 116.28, 103.53],
- std=[58.395, 57.12, 57.375],
- bgr_to_rgb=True),
- backbone=dict(
- type='ResNet',
- depth=50,
- init_cfg=dict(
- type='Pretrained', # 预训练参数,只加载backbone权重用于迁移学习
- checkpoint='torchvision://resnet50')),
- head=dict( # 模型头部
- type='HeatmapHead',
- in_channels=2048,
- out_channels=channel_cfg['num_output_channels'],
- # deconv_out_channels=None,
- loss=dict(type='KeypointMSELoss', use_target_weight=True), # 损失函数
- decoder=codec), # 解码器,将heatmap解码成坐标值
- test_cfg=dict(
- flip_test=True, # 开启测试时水平翻转集成
- flip_mode='heatmap', # 对heatmap进行翻转
- shift_heatmap=True, # 对翻转后的结果进行平移提高精度
- ))
-
-val_evaluator = [
- dict(type='PCKAccuracy', thr=0.2),
- dict(type='AUC'),
- dict(type='EPE'),
-]
-test_evaluator = val_evaluator # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
-
-visualizer = dict(
- vis_backends=[dict(type='LocalVisBackend'),
- dict(type='WandbVisBackend')])
diff --git a/spaces/Abhaykoul/Prompt_generator_for_helpingAI-tti/app.py b/spaces/Abhaykoul/Prompt_generator_for_helpingAI-tti/app.py
deleted file mode 100644
index b2e8fb701de8701f0e82fb440605904532aa98d8..0000000000000000000000000000000000000000
--- a/spaces/Abhaykoul/Prompt_generator_for_helpingAI-tti/app.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from transformers import pipeline, set_seed
-import gradio as grad, random, re
-
-
-gpt2_pipe = pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2')
-with open("ideas.txt", "r") as f:
- line = f.readlines()
-
-
-def generate(starting_text):
- seed = random.randint(100, 1000000)
- set_seed(seed)
-
- if starting_text == "":
- starting_text: str = line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize()
- starting_text: str = re.sub(r"[,:\-–.!;?_]", '', starting_text)
-
- response = gpt2_pipe(starting_text, max_length=(len(starting_text) + random.randint(60, 90)), num_return_sequences=4)
- response_list = []
- for x in response:
- resp = x['generated_text'].strip()
- if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "—")) is False:
- response_list.append(resp+'\n')
-
- response_end = "\n".join(response_list)
- response_end = re.sub('[^ ]+\.[^ ]+','', response_end)
- response_end = response_end.replace("<", "").replace(">", "")
-
- if response_end != "":
- return response_end
-
-
-txt = grad.Textbox(lines=1, label="Initial Text", placeholder="English Text here")
-out = grad.Textbox(lines=4, label="Generated Prompts")
-
-examples = []
-for x in range(8):
- examples.append(line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize())
-
-title = "HelpingAI TTI Prompt Generator"
-description = 'This is a of the model series: "MagicPrompt", in this case, aimed at: "HelpingAI-TTI". To use it, simply submit your text or click on one of the examples'
-
-grad.Interface(fn=generate,
- inputs=txt,
- outputs=out,
- examples=examples,
- title=title,
- description=description,
- article='',
- allow_flagging='never',
- cache_examples=False,
- theme="default").launch(enable_queue=True, debug=True)
-
diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/HuggingChat.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/HuggingChat.py
deleted file mode 100644
index b2cf9793137ab7832fb3c7623ff45a469d988632..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/HuggingChat.py
+++ /dev/null
@@ -1,104 +0,0 @@
-from __future__ import annotations
-
-import json
-
-from aiohttp import ClientSession
-
-from ..typing import AsyncGenerator
-from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
-
-
-class HuggingChat(AsyncGeneratorProvider):
- url = "https://huggingface.co/chat"
- needs_auth = True
- working = True
- model = "OpenAssistant/oasst-sft-6-llama-30b-xor"
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: list[dict[str, str]],
- stream: bool = True,
- proxy: str = None,
- cookies: dict = None,
- **kwargs
- ) -> AsyncGenerator:
- model = model if model else cls.model
- if proxy and "://" not in proxy:
- proxy = f"http://{proxy}"
- if not cookies:
- cookies = get_cookies(".huggingface.co")
-
- headers = {
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
- }
- async with ClientSession(
- cookies=cookies,
- headers=headers
- ) as session:
- async with session.post(f"{cls.url}/conversation", proxy=proxy, json={"model": model}) as response:
- conversation_id = (await response.json())["conversationId"]
-
- send = {
- "inputs": format_prompt(messages),
- "parameters": {
- "temperature": 0.2,
- "truncate": 1000,
- "max_new_tokens": 1024,
- "stop": [""],
- "top_p": 0.95,
- "repetition_penalty": 1.2,
- "top_k": 50,
- "return_full_text": False,
- **kwargs
- },
- "stream": stream,
- "options": {
- "id": "9e9b8bc4-6604-40c6-994e-8eb78fa32e37",
- "response_id": "04ce2602-3bea-45e8-8efc-cef00680376a",
- "is_retry": False,
- "use_cache": False,
- "web_search_id": ""
- }
- }
- async with session.post(f"{cls.url}/conversation/{conversation_id}", proxy=proxy, json=send) as response:
- if not stream:
- data = await response.json()
- if "error" in data:
- raise RuntimeError(data["error"])
- elif isinstance(data, list):
- yield data[0]["generated_text"].strip()
- else:
- raise RuntimeError(f"Response: {data}")
- else:
- start = "data:"
- first = True
- async for line in response.content:
- line = line.decode("utf-8")
- if line.startswith(start):
- line = json.loads(line[len(start):-1])
- if "token" not in line:
- raise RuntimeError(f"Response: {line}")
- if not line["token"]["special"]:
- if first:
- yield line["token"]["text"].lstrip()
- first = False
- else:
- yield line["token"]["text"]
-
- async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response:
- response.raise_for_status()
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/spaces/AgProfile/chatbotopenaihere/README.md b/spaces/AgProfile/chatbotopenaihere/README.md
deleted file mode 100644
index 8689ef5b1443f95e9a13226b74c5ec6d98d7f1db..0000000000000000000000000000000000000000
--- a/spaces/AgProfile/chatbotopenaihere/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Chatbotopenaihere
-emoji: 👀
-colorFrom: red
-colorTo: pink
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/GetChildrenSizers.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/GetChildrenSizers.js
deleted file mode 100644
index 5ff1f5b54a0320a451f8c761bff2bf88cf2a7b13..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/GetChildrenSizers.js
+++ /dev/null
@@ -1,17 +0,0 @@
-var GetChildrenSizers = function (out) {
- if (out === undefined) {
- out = [];
- }
- var children = this.sizerChildren, child;
- for (var i = 0, cnt = children.length; i < cnt; i++) {
- child = children[i];
- if (child === '\n') {
- continue;
- }
- if (child.isRexSizer) {
- out.push(child);
- }
- }
- return out;
-}
-export default GetChildrenSizers;
\ No newline at end of file
diff --git a/spaces/AkiKagura/Marco-Generation/README.md b/spaces/AkiKagura/Marco-Generation/README.md
deleted file mode 100644
index dc5edcfead032726e43161524fd569c5b24975c8..0000000000000000000000000000000000000000
--- a/spaces/AkiKagura/Marco-Generation/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Marco Generation
-emoji: 💻
-colorFrom: yellow
-colorTo: pink
-sdk: gradio
-sdk_version: 3.8
-app_file: app.py
-pinned: false
-license: creativeml-openrail-m
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Alesmikes/elvire01/README.md b/spaces/Alesmikes/elvire01/README.md
deleted file mode 100644
index 4aafa4571453d5b003ecb2781d6547baec0a5219..0000000000000000000000000000000000000000
--- a/spaces/Alesmikes/elvire01/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: QnA
-emoji: 📈
-colorFrom: indigo
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
-duplicated_from: GenAIDemo/Luludemo
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Altinas/vits-uma-genshin-honkais/text/cleaners.py b/spaces/Altinas/vits-uma-genshin-honkais/text/cleaners.py
deleted file mode 100644
index d26581deb399609163518054718ad80ecca5d934..0000000000000000000000000000000000000000
--- a/spaces/Altinas/vits-uma-genshin-honkais/text/cleaners.py
+++ /dev/null
@@ -1,475 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-
-'''
-Cleaners are transformations that run over the input text at both training and eval time.
-
-Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
-hyperparameter. Some cleaners are English-specific. You'll typically want to use:
- 1. "english_cleaners" for English text
- 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
- the Unidecode library (https://pypi.python.org/pypi/Unidecode)
- 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
- the symbols in symbols.py to match your data).
-'''
-
-import re
-from unidecode import unidecode
-import pyopenjtalk
-from jamo import h2j, j2hcj
-from pypinyin import lazy_pinyin, BOPOMOFO
-import jieba, cn2an
-
-
-# This is a list of Korean classifiers preceded by pure Korean numerals.
-_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
-
-# Regular expression matching whitespace:
-_whitespace_re = re.compile(r'\s+')
-
-# Regular expression matching Japanese without punctuation marks:
-_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# Regular expression matching non-Japanese characters or punctuation marks:
-_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# List of (regular expression, replacement) pairs for abbreviations:
-_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
- ('mrs', 'misess'),
- ('mr', 'mister'),
- ('dr', 'doctor'),
- ('st', 'saint'),
- ('co', 'company'),
- ('jr', 'junior'),
- ('maj', 'major'),
- ('gen', 'general'),
- ('drs', 'doctors'),
- ('rev', 'reverend'),
- ('lt', 'lieutenant'),
- ('hon', 'honorable'),
- ('sgt', 'sergeant'),
- ('capt', 'captain'),
- ('esq', 'esquire'),
- ('ltd', 'limited'),
- ('col', 'colonel'),
- ('ft', 'fort'),
-]]
-
-# List of (hangul, hangul divided) pairs:
-_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄳ', 'ㄱㅅ'),
- ('ㄵ', 'ㄴㅈ'),
- ('ㄶ', 'ㄴㅎ'),
- ('ㄺ', 'ㄹㄱ'),
- ('ㄻ', 'ㄹㅁ'),
- ('ㄼ', 'ㄹㅂ'),
- ('ㄽ', 'ㄹㅅ'),
- ('ㄾ', 'ㄹㅌ'),
- ('ㄿ', 'ㄹㅍ'),
- ('ㅀ', 'ㄹㅎ'),
- ('ㅄ', 'ㅂㅅ'),
- ('ㅘ', 'ㅗㅏ'),
- ('ㅙ', 'ㅗㅐ'),
- ('ㅚ', 'ㅗㅣ'),
- ('ㅝ', 'ㅜㅓ'),
- ('ㅞ', 'ㅜㅔ'),
- ('ㅟ', 'ㅜㅣ'),
- ('ㅢ', 'ㅡㅣ'),
- ('ㅑ', 'ㅣㅏ'),
- ('ㅒ', 'ㅣㅐ'),
- ('ㅕ', 'ㅣㅓ'),
- ('ㅖ', 'ㅣㅔ'),
- ('ㅛ', 'ㅣㅗ'),
- ('ㅠ', 'ㅣㅜ')
-]]
-
-# List of (Latin alphabet, hangul) pairs:
-_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', '에이'),
- ('b', '비'),
- ('c', '시'),
- ('d', '디'),
- ('e', '이'),
- ('f', '에프'),
- ('g', '지'),
- ('h', '에이치'),
- ('i', '아이'),
- ('j', '제이'),
- ('k', '케이'),
- ('l', '엘'),
- ('m', '엠'),
- ('n', '엔'),
- ('o', '오'),
- ('p', '피'),
- ('q', '큐'),
- ('r', '아르'),
- ('s', '에스'),
- ('t', '티'),
- ('u', '유'),
- ('v', '브이'),
- ('w', '더블유'),
- ('x', '엑스'),
- ('y', '와이'),
- ('z', '제트')
-]]
-
-# List of (Latin alphabet, bopomofo) pairs:
-_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', 'ㄟˉ'),
- ('b', 'ㄅㄧˋ'),
- ('c', 'ㄙㄧˉ'),
- ('d', 'ㄉㄧˋ'),
- ('e', 'ㄧˋ'),
- ('f', 'ㄝˊㄈㄨˋ'),
- ('g', 'ㄐㄧˋ'),
- ('h', 'ㄝˇㄑㄩˋ'),
- ('i', 'ㄞˋ'),
- ('j', 'ㄐㄟˋ'),
- ('k', 'ㄎㄟˋ'),
- ('l', 'ㄝˊㄛˋ'),
- ('m', 'ㄝˊㄇㄨˋ'),
- ('n', 'ㄣˉ'),
- ('o', 'ㄡˉ'),
- ('p', 'ㄆㄧˉ'),
- ('q', 'ㄎㄧㄡˉ'),
- ('r', 'ㄚˋ'),
- ('s', 'ㄝˊㄙˋ'),
- ('t', 'ㄊㄧˋ'),
- ('u', 'ㄧㄡˉ'),
- ('v', 'ㄨㄧˉ'),
- ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
- ('x', 'ㄝˉㄎㄨˋㄙˋ'),
- ('y', 'ㄨㄞˋ'),
- ('z', 'ㄗㄟˋ')
-]]
-
-
-# List of (bopomofo, romaji) pairs:
-_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('ㄅㄛ', 'p⁼wo'),
- ('ㄆㄛ', 'pʰwo'),
- ('ㄇㄛ', 'mwo'),
- ('ㄈㄛ', 'fwo'),
- ('ㄅ', 'p⁼'),
- ('ㄆ', 'pʰ'),
- ('ㄇ', 'm'),
- ('ㄈ', 'f'),
- ('ㄉ', 't⁼'),
- ('ㄊ', 'tʰ'),
- ('ㄋ', 'n'),
- ('ㄌ', 'l'),
- ('ㄍ', 'k⁼'),
- ('ㄎ', 'kʰ'),
- ('ㄏ', 'h'),
- ('ㄐ', 'ʧ⁼'),
- ('ㄑ', 'ʧʰ'),
- ('ㄒ', 'ʃ'),
- ('ㄓ', 'ʦ`⁼'),
- ('ㄔ', 'ʦ`ʰ'),
- ('ㄕ', 's`'),
- ('ㄖ', 'ɹ`'),
- ('ㄗ', 'ʦ⁼'),
- ('ㄘ', 'ʦʰ'),
- ('ㄙ', 's'),
- ('ㄚ', 'a'),
- ('ㄛ', 'o'),
- ('ㄜ', 'ə'),
- ('ㄝ', 'e'),
- ('ㄞ', 'ai'),
- ('ㄟ', 'ei'),
- ('ㄠ', 'au'),
- ('ㄡ', 'ou'),
- ('ㄧㄢ', 'yeNN'),
- ('ㄢ', 'aNN'),
- ('ㄧㄣ', 'iNN'),
- ('ㄣ', 'əNN'),
- ('ㄤ', 'aNg'),
- ('ㄧㄥ', 'iNg'),
- ('ㄨㄥ', 'uNg'),
- ('ㄩㄥ', 'yuNg'),
- ('ㄥ', 'əNg'),
- ('ㄦ', 'əɻ'),
- ('ㄧ', 'i'),
- ('ㄨ', 'u'),
- ('ㄩ', 'ɥ'),
- ('ˉ', '→'),
- ('ˊ', '↑'),
- ('ˇ', '↓↑'),
- ('ˋ', '↓'),
- ('˙', ''),
- (',', ','),
- ('。', '.'),
- ('!', '!'),
- ('?', '?'),
- ('—', '-')
-]]
-
-
-def expand_abbreviations(text):
- for regex, replacement in _abbreviations:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def lowercase(text):
- return text.lower()
-
-
-def collapse_whitespace(text):
- return re.sub(_whitespace_re, ' ', text)
-
-
-def convert_to_ascii(text):
- return unidecode(text)
-
-
-def japanese_to_romaji_with_accent(text):
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
- sentences = re.split(_japanese_marks, text)
- marks = re.findall(_japanese_marks, text)
- text = ''
- for i, sentence in enumerate(sentences):
- if re.match(_japanese_characters, sentence):
- if text!='':
- text+=' '
- labels = pyopenjtalk.extract_fullcontext(sentence)
- for n, label in enumerate(labels):
- phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
- if phoneme not in ['sil','pau']:
- text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q')
- else:
- continue
- n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
- a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
- a2 = int(re.search(r"\+(\d+)\+", label).group(1))
- a3 = int(re.search(r"\+(\d+)/", label).group(1))
- if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']:
- a2_next=-1
- else:
- a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
- # Accent phrase boundary
- if a3 == 1 and a2_next == 1:
- text += ' '
- # Falling
- elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras:
- text += '↓'
- # Rising
- elif a2 == 1 and a2_next == 2:
- text += '↑'
- if i
-#include
-
-#include "libipc/shm.h"
-
-#include "libipc/utility/pimpl.h"
-#include "libipc/memory/resource.h"
-
-namespace ipc {
-namespace shm {
-
-class handle::handle_ : public pimpl {
-public:
- shm::id_t id_ = nullptr;
- void* m_ = nullptr;
-
- ipc::string n_;
- std::size_t s_ = 0;
-};
-
-handle::handle()
- : p_(p_->make()) {
-}
-
-handle::handle(char const * name, std::size_t size, unsigned mode)
- : handle() {
- acquire(name, size, mode);
-}
-
-handle::handle(handle&& rhs)
- : handle() {
- swap(rhs);
-}
-
-handle::~handle() {
- release();
- p_->clear();
-}
-
-void handle::swap(handle& rhs) {
- std::swap(p_, rhs.p_);
-}
-
-handle& handle::operator=(handle rhs) {
- swap(rhs);
- return *this;
-}
-
-bool handle::valid() const noexcept {
- return impl(p_)->m_ != nullptr;
-}
-
-std::size_t handle::size() const noexcept {
- return impl(p_)->s_;
-}
-
-char const * handle::name() const noexcept {
- return impl(p_)->n_.c_str();
-}
-
-std::int32_t handle::ref() const noexcept {
- return shm::get_ref(impl(p_)->id_);
-}
-
-void handle::sub_ref() noexcept {
- shm::sub_ref(impl(p_)->id_);
-}
-
-bool handle::acquire(char const * name, std::size_t size, unsigned mode) {
- release();
- impl(p_)->id_ = shm::acquire((impl(p_)->n_ = name).c_str(), size, mode);
- impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_));
- return valid();
-}
-
-std::int32_t handle::release() {
- if (impl(p_)->id_ == nullptr) return -1;
- return shm::release(detach());
-}
-
-void* handle::get() const {
- return impl(p_)->m_;
-}
-
-void handle::attach(id_t id) {
- if (id == nullptr) return;
- release();
- impl(p_)->id_ = id;
- impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_));
-}
-
-id_t handle::detach() {
- auto old = impl(p_)->id_;
- impl(p_)->id_ = nullptr;
- impl(p_)->m_ = nullptr;
- impl(p_)->s_ = 0;
- impl(p_)->n_.clear();
- return old;
-}
-
-} // namespace shm
-} // namespace ipc
diff --git a/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/cppipc/prod_cons.h b/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/cppipc/prod_cons.h
deleted file mode 100644
index c9004bb8043a12e32814436baa6262a00c8ef68e..0000000000000000000000000000000000000000
--- a/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/cppipc/prod_cons.h
+++ /dev/null
@@ -1,433 +0,0 @@
-#pragma once
-
-#include
-#include
-#include
-#include
-#include
-
-#include "libipc/def.h"
-
-#include "libipc/platform/detail.h"
-#include "libipc/circ/elem_def.h"
-#include "libipc/utility/log.h"
-#include "libipc/utility/utility.h"
-
-namespace ipc {
-
-////////////////////////////////////////////////////////////////
-/// producer-consumer implementation
-////////////////////////////////////////////////////////////////
-
-template
-struct prod_cons_impl;
-
-template <>
-struct prod_cons_impl> {
-
- template
- struct elem_t {
- std::aligned_storage_t data_ {};
- };
-
- alignas(cache_line_size) std::atomic rd_; // read index
- alignas(cache_line_size) std::atomic wt_; // write index
-
- constexpr circ::u2_t cursor() const noexcept {
- return 0;
- }
-
- template
- bool push(W* /*wrapper*/, F&& f, E* elems) {
- auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed));
- if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) {
- return false; // full
- }
- std::forward(f)(&(elems[cur_wt].data_));
- wt_.fetch_add(1, std::memory_order_release);
- return true;
- }
-
- /**
- * In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'.
- * So we could just disconnect all connections of receiver, and return false.
- */
- template
- bool force_push(W* wrapper, F&&, E*) {
- wrapper->elems()->disconnect_receiver(~static_cast(0u));
- return false;
- }
-
- template
- bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
- auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed));
- if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) {
- return false; // empty
- }
- std::forward(f)(&(elems[cur_rd].data_));
- std::forward(out)(true);
- rd_.fetch_add(1, std::memory_order_release);
- return true;
- }
-};
-
-template <>
-struct prod_cons_impl>
- : prod_cons_impl> {
-
- template
- bool force_push(W* wrapper, F&&, E*) {
- wrapper->elems()->disconnect_receiver(1);
- return false;
- }
-
- template class E, std::size_t DS, std::size_t AS>
- bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
- byte_t buff[DS];
- for (unsigned k = 0;;) {
- auto cur_rd = rd_.load(std::memory_order_relaxed);
- if (circ::index_of(cur_rd) ==
- circ::index_of(wt_.load(std::memory_order_acquire))) {
- return false; // empty
- }
- std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
- if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
- std::forward(f)(buff);
- std::forward(out)(true);
- return true;
- }
- ipc::yield(k);
- }
- }
-};
-
-template <>
-struct prod_cons_impl>
- : prod_cons_impl> {
-
- using flag_t = std::uint64_t;
-
- template
- struct elem_t {
- std::aligned_storage_t data_ {};
- std::atomic f_ct_ { 0 }; // commit flag
- };
-
- alignas(cache_line_size) std::atomic ct_; // commit index
-
- template
- bool push(W* /*wrapper*/, F&& f, E* elems) {
- circ::u2_t cur_ct, nxt_ct;
- for (unsigned k = 0;;) {
- cur_ct = ct_.load(std::memory_order_relaxed);
- if (circ::index_of(nxt_ct = cur_ct + 1) ==
- circ::index_of(rd_.load(std::memory_order_acquire))) {
- return false; // full
- }
- if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) {
- break;
- }
- ipc::yield(k);
- }
- auto* el = elems + circ::index_of(cur_ct);
- std::forward(f)(&(el->data_));
- // set flag & try update wt
- el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release);
- while (1) {
- auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
- if (cur_ct != wt_.load(std::memory_order_relaxed)) {
- return true;
- }
- if ((~cac_ct) != cur_ct) {
- return true;
- }
- if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) {
- return true;
- }
- wt_.store(nxt_ct, std::memory_order_release);
- cur_ct = nxt_ct;
- nxt_ct = cur_ct + 1;
- el = elems + circ::index_of(cur_ct);
- }
- return true;
- }
-
- template
- bool force_push(W* wrapper, F&&, E*) {
- wrapper->elems()->disconnect_receiver(1);
- return false;
- }
-
- template class E, std::size_t DS, std::size_t AS>
- bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
- byte_t buff[DS];
- for (unsigned k = 0;;) {
- auto cur_rd = rd_.load(std::memory_order_relaxed);
- auto cur_wt = wt_.load(std::memory_order_acquire);
- auto id_rd = circ::index_of(cur_rd);
- auto id_wt = circ::index_of(cur_wt);
- if (id_rd == id_wt) {
- auto* el = elems + id_wt;
- auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
- if ((~cac_ct) != cur_wt) {
- return false; // empty
- }
- if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) {
- wt_.store(cur_wt + 1, std::memory_order_release);
- }
- k = 0;
- }
- else {
- std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
- if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
- std::forward(f)(buff);
- std::forward(out)(true);
- return true;
- }
- ipc::yield(k);
- }
- }
- }
-};
-
-template <>
-struct prod_cons_impl> {
-
- using rc_t = std::uint64_t;
-
- enum : rc_t {
- ep_mask = 0x00000000ffffffffull,
- ep_incr = 0x0000000100000000ull
- };
-
- template
- struct elem_t {
- std::aligned_storage_t data_ {};
- std::atomic rc_ { 0 }; // read-counter
- };
-
- alignas(cache_line_size) std::atomic wt_; // write index
- alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer
-
- circ::u2_t cursor() const noexcept {
- return wt_.load(std::memory_order_acquire);
- }
-
- template
- bool push(W* wrapper, F&& f, E* elems) {
- E* el;
- for (unsigned k = 0;;) {
- circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
- if (cc == 0) return false; // no reader
- el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
- // check all consumers have finished reading this element
- auto cur_rc = el->rc_.load(std::memory_order_acquire);
- circ::cc_t rem_cc = cur_rc & ep_mask;
- if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) {
- return false; // has not finished yet
- }
- // consider rem_cc to be 0 here
- if (el->rc_.compare_exchange_weak(
- cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) {
- break;
- }
- ipc::yield(k);
- }
- std::forward(f)(&(el->data_));
- wt_.fetch_add(1, std::memory_order_release);
- return true;
- }
-
- template
- bool force_push(W* wrapper, F&& f, E* elems) {
- E* el;
- epoch_ += ep_incr;
- for (unsigned k = 0;;) {
- circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
- if (cc == 0) return false; // no reader
- el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
- // check all consumers have finished reading this element
- auto cur_rc = el->rc_.load(std::memory_order_acquire);
- circ::cc_t rem_cc = cur_rc & ep_mask;
- if (cc & rem_cc) {
- ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
- cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
- if (cc == 0) return false; // no reader
- }
- // just compare & exchange
- if (el->rc_.compare_exchange_weak(
- cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) {
- break;
- }
- ipc::yield(k);
- }
- std::forward(f)(&(el->data_));
- wt_.fetch_add(1, std::memory_order_release);
- return true;
- }
-
- template
- bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) {
- if (cur == cursor()) return false; // acquire
- auto* el = elems + circ::index_of(cur++);
- std::forward(f)(&(el->data_));
- for (unsigned k = 0;;) {
- auto cur_rc = el->rc_.load(std::memory_order_acquire);
- if ((cur_rc & ep_mask) == 0) {
- std::forward(out)(true);
- return true;
- }
- auto nxt_rc = cur_rc & ~static_cast(wrapper->connected_id());
- if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
- std::forward(out)((nxt_rc & ep_mask) == 0);
- return true;
- }
- ipc::yield(k);
- }
- }
-};
-
-template <>
-struct prod_cons_impl> {
-
- using rc_t = std::uint64_t;
- using flag_t = std::uint64_t;
-
- enum : rc_t {
- rc_mask = 0x00000000ffffffffull,
- ep_mask = 0x00ffffffffffffffull,
- ep_incr = 0x0100000000000000ull,
- ic_mask = 0xff000000ffffffffull,
- ic_incr = 0x0000000100000000ull
- };
-
- template
- struct elem_t {
- std::aligned_storage_t data_ {};
- std::atomic rc_ { 0 }; // read-counter
- std::atomic f_ct_ { 0 }; // commit flag
- };
-
- alignas(cache_line_size) std::atomic ct_; // commit index
- alignas(cache_line_size) std::atomic epoch_ { 0 };
-
- circ::u2_t cursor() const noexcept {
- return ct_.load(std::memory_order_acquire);
- }
-
- constexpr static rc_t inc_rc(rc_t rc) noexcept {
- return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask);
- }
-
- constexpr static rc_t inc_mask(rc_t rc) noexcept {
- return inc_rc(rc) & ~rc_mask;
- }
-
- template
- bool push(W* wrapper, F&& f, E* elems) {
- E* el;
- circ::u2_t cur_ct;
- rc_t epoch = epoch_.load(std::memory_order_acquire);
- for (unsigned k = 0;;) {
- circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
- if (cc == 0) return false; // no reader
- el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
- // check all consumers have finished reading this element
- auto cur_rc = el->rc_.load(std::memory_order_relaxed);
- circ::cc_t rem_cc = cur_rc & rc_mask;
- if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) {
- return false; // has not finished yet
- }
- else if (!rem_cc) {
- auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
- if ((cur_fl != cur_ct) && cur_fl) {
- return false; // full
- }
- }
- // consider rem_cc to be 0 here
- if (el->rc_.compare_exchange_weak(
- cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed) &&
- epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) {
- break;
- }
- ipc::yield(k);
- }
- // only one thread/process would touch here at one time
- ct_.store(cur_ct + 1, std::memory_order_release);
- std::forward(f)(&(el->data_));
- // set flag & try update wt
- el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release);
- return true;
- }
-
- template
- bool force_push(W* wrapper, F&& f, E* elems) {
- E* el;
- circ::u2_t cur_ct;
- rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
- for (unsigned k = 0;;) {
- circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
- if (cc == 0) return false; // no reader
- el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
- // check all consumers have finished reading this element
- auto cur_rc = el->rc_.load(std::memory_order_acquire);
- circ::cc_t rem_cc = cur_rc & rc_mask;
- if (cc & rem_cc) {
- ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
- cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
- if (cc == 0) return false; // no reader
- }
- // just compare & exchange
- if (el->rc_.compare_exchange_weak(
- cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed)) {
- if (epoch == epoch_.load(std::memory_order_acquire)) {
- break;
- }
- else if (push(wrapper, std::forward(f), elems)) {
- return true;
- }
- epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
- }
- ipc::yield(k);
- }
- // only one thread/process would touch here at one time
- ct_.store(cur_ct + 1, std::memory_order_release);
- std::forward(f)(&(el->data_));
- // set flag & try update wt
- el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release);
- return true;
- }
-
- template
- bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) {
- auto* el = elems + circ::index_of(cur);
- auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
- if (cur_fl != ~static_cast(cur)) {
- return false; // empty
- }
- ++cur;
- std::forward(f)(&(el->data_));
- for (unsigned k = 0;;) {
- auto cur_rc = el->rc_.load(std::memory_order_acquire);
- if ((cur_rc & rc_mask) == 0) {
- std::forward(out)(true);
- el->f_ct_.store(cur + N - 1, std::memory_order_release);
- return true;
- }
- auto nxt_rc = inc_rc(cur_rc) & ~static_cast(wrapper->connected_id());
- bool last_one = false;
- if ((last_one = (nxt_rc & rc_mask) == 0)) {
- el->f_ct_.store(cur + N - 1, std::memory_order_release);
- }
- if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
- std::forward(out)(last_one);
- return true;
- }
- ipc::yield(k);
- }
- }
-};
-
-} // namespace ipc
diff --git a/spaces/AndrewMetaBlock/emilyalsentzer-Bio_ClinicalBERT/README.md b/spaces/AndrewMetaBlock/emilyalsentzer-Bio_ClinicalBERT/README.md
deleted file mode 100644
index dfb0bab63a1bb6d539fb41bfebdbe09542006bf6..0000000000000000000000000000000000000000
--- a/spaces/AndrewMetaBlock/emilyalsentzer-Bio_ClinicalBERT/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Emilyalsentzer-Bio ClinicalBERT
-emoji: 🐢
-colorFrom: red
-colorTo: purple
-sdk: gradio
-sdk_version: 3.20.1
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/stable_diffusion_jax_how_to.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/stable_diffusion_jax_how_to.md
deleted file mode 100644
index 2150f2f769fd75744a11e8b21c5928f1f194c24f..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/stable_diffusion_jax_how_to.md
+++ /dev/null
@@ -1,251 +0,0 @@
-# 🧨 Stable Diffusion in JAX / Flax !
-
-[[open-in-colab]]
-
-🤗 Hugging Face [Diffusers](https://github.com/huggingface/diffusers) supports Flax since version `0.5.1`! This allows for super fast inference on Google TPUs, such as those available in Colab, Kaggle or Google Cloud Platform.
-
-This notebook shows how to run inference using JAX / Flax. If you want more details about how Stable Diffusion works or want to run it in GPU, please refer to [this notebook](https://huggingface.co/docs/diffusers/stable_diffusion).
-
-First, make sure you are using a TPU backend. If you are running this notebook in Colab, select `Runtime` in the menu above, then select the option "Change runtime type" and then select `TPU` under the `Hardware accelerator` setting.
-
-Note that JAX is not exclusive to TPUs, but it shines on that hardware because each TPU server has 8 TPU accelerators working in parallel.
-
-## Setup
-
-First make sure diffusers is installed.
-
-```py
-# uncomment to install the necessary libraries in Colab
-#!pip install jax==0.3.25 jaxlib==0.3.25 flax transformers ftfy
-#!pip install diffusers
-```
-
-```python
-import jax.tools.colab_tpu
-
-jax.tools.colab_tpu.setup_tpu()
-import jax
-```
-
-```python
-num_devices = jax.device_count()
-device_type = jax.devices()[0].device_kind
-
-print(f"Found {num_devices} JAX devices of type {device_type}.")
-assert (
- "TPU" in device_type
-), "Available device is not a TPU, please select TPU from Edit > Notebook settings > Hardware accelerator"
-```
-
-```python out
-Found 8 JAX devices of type Cloud TPU.
-```
-
-Then we import all the dependencies.
-
-```python
-import numpy as np
-import jax
-import jax.numpy as jnp
-
-from pathlib import Path
-from jax import pmap
-from flax.jax_utils import replicate
-from flax.training.common_utils import shard
-from PIL import Image
-
-from huggingface_hub import notebook_login
-from diffusers import FlaxStableDiffusionPipeline
-```
-
-## Model Loading
-
-TPU devices support `bfloat16`, an efficient half-float type. We'll use it for our tests, but you can also use `float32` to use full precision instead.
-
-```python
-dtype = jnp.bfloat16
-```
-
-Flax is a functional framework, so models are stateless and parameters are stored outside them. Loading the pre-trained Flax pipeline will return both the pipeline itself and the model weights (or parameters). We are using a `bf16` version of the weights, which leads to type warnings that you can safely ignore.
-
-```python
-pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
- "CompVis/stable-diffusion-v1-4",
- revision="bf16",
- dtype=dtype,
-)
-```
-
-## Inference
-
-Since TPUs usually have 8 devices working in parallel, we'll replicate our prompt as many times as devices we have. Then we'll perform inference on the 8 devices at once, each responsible for generating one image. Thus, we'll get 8 images in the same amount of time it takes for one chip to generate a single one.
-
-After replicating the prompt, we obtain the tokenized text ids by invoking the `prepare_inputs` function of the pipeline. The length of the tokenized text is set to 77 tokens, as required by the configuration of the underlying CLIP Text model.
-
-```python
-prompt = "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of field, close up, split lighting, cinematic"
-prompt = [prompt] * jax.device_count()
-prompt_ids = pipeline.prepare_inputs(prompt)
-prompt_ids.shape
-```
-
-```python out
-(8, 77)
-```
-
-### Replication and parallelization
-
-Model parameters and inputs have to be replicated across the 8 parallel devices we have. The parameters dictionary is replicated using `flax.jax_utils.replicate`, which traverses the dictionary and changes the shape of the weights so they are repeated 8 times. Arrays are replicated using `shard`.
-
-```python
-p_params = replicate(params)
-```
-
-```python
-prompt_ids = shard(prompt_ids)
-prompt_ids.shape
-```
-
-```python out
-(8, 1, 77)
-```
-
-That shape means that each one of the `8` devices will receive as an input a `jnp` array with shape `(1, 77)`. `1` is therefore the batch size per device. In TPUs with sufficient memory, it could be larger than `1` if we wanted to generate multiple images (per chip) at once.
-
-We are almost ready to generate images! We just need to create a random number generator to pass to the generation function. This is the standard procedure in Flax, which is very serious and opinionated about random numbers – all functions that deal with random numbers are expected to receive a generator. This ensures reproducibility, even when we are training across multiple distributed devices.
-
-The helper function below uses a seed to initialize a random number generator. As long as we use the same seed, we'll get the exact same results. Feel free to use different seeds when exploring results later in the notebook.
-
-```python
-def create_key(seed=0):
- return jax.random.PRNGKey(seed)
-```
-
-We obtain a rng and then "split" it 8 times so each device receives a different generator. Therefore, each device will create a different image, and the full process is reproducible.
-
-```python
-rng = create_key(0)
-rng = jax.random.split(rng, jax.device_count())
-```
-
-JAX code can be compiled to an efficient representation that runs very fast. However, we need to ensure that all inputs have the same shape in subsequent calls; otherwise, JAX will have to recompile the code, and we wouldn't be able to take advantage of the optimized speed.
-
-The Flax pipeline can compile the code for us if we pass `jit = True` as an argument. It will also ensure that the model runs in parallel in the 8 available devices.
-
-The first time we run the following cell it will take a long time to compile, but subequent calls (even with different inputs) will be much faster. For example, it took more than a minute to compile in a TPU v2-8 when I tested, but then it takes about **`7s`** for future inference runs.
-
-```
-%%time
-images = pipeline(prompt_ids, p_params, rng, jit=True)[0]
-```
-
-```python out
-CPU times: user 56.2 s, sys: 42.5 s, total: 1min 38s
-Wall time: 1min 29s
-```
-
-The returned array has shape `(8, 1, 512, 512, 3)`. We reshape it to get rid of the second dimension and obtain 8 images of `512 × 512 × 3` and then convert them to PIL.
-
-```python
-images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
-images = pipeline.numpy_to_pil(images)
-```
-
-### Visualization
-
-Let's create a helper function to display images in a grid.
-
-```python
-def image_grid(imgs, rows, cols):
- w, h = imgs[0].size
- grid = Image.new("RGB", size=(cols * w, rows * h))
- for i, img in enumerate(imgs):
- grid.paste(img, box=(i % cols * w, i // cols * h))
- return grid
-```
-
-```python
-image_grid(images, 2, 4)
-```
-
-
-
-
-## Using different prompts
-
-We don't have to replicate the _same_ prompt in all the devices. We can do whatever we want: generate 2 prompts 4 times each, or even generate 8 different prompts at once. Let's do that!
-
-First, we'll refactor the input preparation code into a handy function:
-
-```python
-prompts = [
- "Labrador in the style of Hokusai",
- "Painting of a squirrel skating in New York",
- "HAL-9000 in the style of Van Gogh",
- "Times Square under water, with fish and a dolphin swimming around",
- "Ancient Roman fresco showing a man working on his laptop",
- "Close-up photograph of young black woman against urban background, high quality, bokeh",
- "Armchair in the shape of an avocado",
- "Clown astronaut in space, with Earth in the background",
-]
-```
-
-```python
-prompt_ids = pipeline.prepare_inputs(prompts)
-prompt_ids = shard(prompt_ids)
-
-images = pipeline(prompt_ids, p_params, rng, jit=True).images
-images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
-images = pipeline.numpy_to_pil(images)
-
-image_grid(images, 2, 4)
-```
-
-
-
-
-## How does parallelization work?
-
-We said before that the `diffusers` Flax pipeline automatically compiles the model and runs it in parallel on all available devices. We'll now briefly look inside that process to show how it works.
-
-JAX parallelization can be done in multiple ways. The easiest one revolves around using the `jax.pmap` function to achieve single-program, multiple-data (SPMD) parallelization. It means we'll run several copies of the same code, each on different data inputs. More sophisticated approaches are possible, we invite you to go over the [JAX documentation](https://jax.readthedocs.io/en/latest/index.html) and the [`pjit` pages](https://jax.readthedocs.io/en/latest/jax-101/08-pjit.html?highlight=pjit) to explore this topic if you are interested!
-
-`jax.pmap` does two things for us:
-- Compiles (or `jit`s) the code, as if we had invoked `jax.jit()`. This does not happen when we call `pmap`, but the first time the pmapped function is invoked.
-- Ensures the compiled code runs in parallel in all the available devices.
-
-To show how it works we `pmap` the `_generate` method of the pipeline, which is the private method that runs generates images. Please, note that this method may be renamed or removed in future releases of `diffusers`.
-
-```python
-p_generate = pmap(pipeline._generate)
-```
-
-After we use `pmap`, the prepared function `p_generate` will conceptually do the following:
-* Invoke a copy of the underlying function `pipeline._generate` in each device.
-* Send each device a different portion of the input arguments. That's what sharding is used for. In our case, `prompt_ids` has shape `(8, 1, 77, 768)`. This array will be split in `8` and each copy of `_generate` will receive an input with shape `(1, 77, 768)`.
-
-We can code `_generate` completely ignoring the fact that it will be invoked in parallel. We just care about our batch size (`1` in this example) and the dimensions that make sense for our code, and don't have to change anything to make it work in parallel.
-
-The same way as when we used the pipeline call, the first time we run the following cell it will take a while, but then it will be much faster.
-
-```
-%%time
-images = p_generate(prompt_ids, p_params, rng)
-images = images.block_until_ready()
-images.shape
-```
-
-```python out
-CPU times: user 1min 15s, sys: 18.2 s, total: 1min 34s
-Wall time: 1min 15s
-```
-
-```python
-images.shape
-```
-
-```python out
-(8, 1, 512, 512, 3)
-```
-
-We use `block_until_ready()` to correctly measure inference time, because JAX uses asynchronous dispatch and returns control to the Python loop as soon as it can. You don't need to use that in your code; blocking will occur automatically when you want to use the result of a computation that has not yet been materialized.
\ No newline at end of file
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_40k_pascal_context.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_40k_pascal_context.py
deleted file mode 100644
index 9d493ef527bb161be98d0e4ea433104b3bb9ff48..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_40k_pascal_context.py
+++ /dev/null
@@ -1,10 +0,0 @@
-_base_ = [
- '../_base_/models/deeplabv3_r50-d8.py',
- '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py',
- '../_base_/schedules/schedule_40k.py'
-]
-model = dict(
- decode_head=dict(num_classes=60),
- auxiliary_head=dict(num_classes=60),
- test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320)))
-optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001)
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug.py
deleted file mode 100644
index ebb1a8eaee16de7443ab3e79e02a37340de511d7..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './deeplabv3plus_r50-d8_512x512_20k_voc12aug.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/Artrajz/vits-simple-api/vits-simple-api-installer-latest.sh b/spaces/Artrajz/vits-simple-api/vits-simple-api-installer-latest.sh
deleted file mode 100644
index ea303200aa35797ad4be2e5f357ebe7e72fce860..0000000000000000000000000000000000000000
--- a/spaces/Artrajz/vits-simple-api/vits-simple-api-installer-latest.sh
+++ /dev/null
@@ -1,273 +0,0 @@
-INSTALL_DIR=/usr/local/vits-simple-api
-
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[0;33m'
-PLAIN='\033[0m'
-
-declare -A EN_MESSAGES
-declare -A ZH_MESSAGES
-
-EN_MESSAGES=(
- ["ATTEMPT_DOWNLOAD"]="Attempting to download"
- ["FROM"]="from"
- ["DOWNLOAD_FAIL"]="Failed to download"
- ["FROM_ALL_URLS"]="from all provided URLs."
- ["DOWNLOADING"]="Downloading..."
- ["VERIFYING"]="Verifying..."
- ["UNZIPPING"]="Unzipping..."
- ["CHOOSE_VERSION"]="Which version of docker-compose.yaml do you want to download?"
- ["DOCKER_CPU"]="docker-compose.yaml (CPU version)"
- ["DOCKER_GPU"]="docker-compose-gpu.yaml (GPU version)"
- ["ENTER_CHOICE"]="Enter your choice (1 or 2): "
- ["INVALID_CHOICE"]="Invalid choice. Please enter 1 or 2."
- ["DOWNLOAD_CONFIG"]="Downloading configuration file shortly..."
- ["PULL_IMAGE"]="Do you want to start pulling the image? Enter 1 for yes or 2 for no"
- ["DOWNLOAD_DICT"]="Do you want to download the pyopenjtalk dictionary file? Enter 1 for yes or 2 for no"
- ["MUST_DOWNLOAD_JP"]="Japanese model must be downloaded."
- ["DOWNLOAD_VITS_CHINESE"]="Do you want to download the bert model for vits_chinese? Enter 1 for yes, 2 for no."
- ["MUST_DOWNLOAD_VITS_CHINESE"]="Using vits_chinese requires downloading these models, which will take up about 410MB."
- ["DOWNLOAD_BERT_VITS2"]="Do you want to download chinese-roberta-wwm-ext-large? Enter 1 for yes or 2 for no"
- ["MUST_DOWNLOAD_BERT_VITS2"]="To use Bert-VITS2, you must download these models, which will take up about 1.63GB."
- ["DOWNLOADED"]="File is downloaded correctly."
- ["CORRUPTED"]="The file may not have been downloaded, or the download might be incomplete, and it could also be corrupted."
- ["INSTALL_COMPLETE"]="The upgrade or installation has been completed."
- ["CONFIG_DIR"]="The configuration file directory is"
- ["IMPORT_NOTICE"]="If the vits model is not imported, it cannot be used. Import the model in the configuration file directory."
- ["RESTART_NOTICE"]="After modifying the configuration file, restart the docker container for the modification to take effect."
- ["ISSUE_NOTICE"]="If you have any questions, please put them in the issues."
- ["GITHUB_LINK"]="https://github.com/Artrajz/vits-simple-api"
-)
-
-ZH_MESSAGES=(
- ["ATTEMPT_DOWNLOAD"]="正在尝试下载"
- ["FROM"]="从"
- ["DOWNLOAD_FAIL"]="都下载失败"
- ["FROM_ALL_URLS"]="从所有提供的URLs"
- ["DOWNLOADING"]="正在下载..."
- ["VERIFYING"]="正在校验"
- ["UNZIPPING"]="正在解压..."
- ["CHOOSE_VERSION"]="你想下载哪个版本的docker-compose.yaml?"
- ["DOCKER_CPU"]="docker-compose.yaml (CPU版本)"
- ["DOCKER_GPU"]="docker-compose-gpu.yaml (GPU版本)"
- ["ENTER_CHOICE"]="请输入您的选择 (1 或 2): "
- ["INVALID_CHOICE"]="无效选择。 请重新输入 1 或 2。"
- ["DOWNLOAD_CONFIG"]="即将下载配置文件..."
- ["PULL_IMAGE"]="是否要开始拉取镜像?输入1表示是,2表示否。"
- ["DOWNLOAD_DICT"]="是否要下载pyopenjtalk的词典文件?输入1表示是,2表示否。"
- ["MUST_DOWNLOAD_JP"]="使用日语模型必须下载该词典文件,将占用大约102MB。"
- ["DOWNLOAD_VITS_CHINESE"]="是否要下载vits_chinese的bert模型?输入1表示是,2表示否。"
- ["MUST_DOWNLOAD_VITS_CHINESE"]="使用vits_chinese必须下载这些模型,将占用大约410MB。"
- ["DOWNLOAD_BERT_VITS2"]="是否要下载chinese-roberta-wwm-ext-large?输入1表示是,2表示否。"
- ["MUST_DOWNLOAD_BERT_VITS2"]="使用Bert-VITS2必须下载这些模型,将占用大约1.63GB。"
- ["DOWNLOADED"]="文件已正确下载。"
- ["CORRUPTED"]="文件可能未下载,或下载不完整,也有可能已损坏。"
- ["INSTALL_COMPLETE"]="更新或安装已完成。"
- ["CONFIG_DIR"]="配置文件目录是"
- ["IMPORT_NOTICE"]="如果vits模型没有被导入,它是无法使用的。请在配置文件目录中导入模型。"
- ["RESTART_NOTICE"]="修改配置文件后,请重启docker容器以使修改生效。"
- ["ISSUE_NOTICE"]="如果你有任何问题,请在issues中提出,或者加入q群提问。"
- ["GITHUB_LINK"]="https://github.com/Artrajz/vits-simple-api"
-)
-
-echo -e "${PLAIN}${GREEN}Choose a language/选择语言: ${PLAIN}"
-echo "1. English"
-echo "2. 中文"
-read -p "Enter your choice (1 or 2): " choice_language
-
-declare -A MESSAGES
-if [ "$choice_language" -eq 1 ]; then
- for key in "${!EN_MESSAGES[@]}"; do
- MESSAGES["$key"]="${EN_MESSAGES[$key]}"
- done
-else
- for key in "${!ZH_MESSAGES[@]}"; do
- MESSAGES["$key"]="${ZH_MESSAGES[$key]}"
- done
-fi
-
-mkdir -p $INSTALL_DIR
-cd $INSTALL_DIR
-
-download_with_fallback() {
- local filename=$1
- shift # Shift arguments to the left to handle URLs
-
- local success=0
- local url
- for url in "$@"; do
- echo -e "${YELLOW}${MESSAGES["ATTEMPT_DOWNLOAD"]} $filename ${MESSAGES["FROM"]} $url\n${PLAIN}"
- if wget -O "$INSTALL_DIR/$filename" "$url"; then
- success=1
- break
- fi
- done
-
- if [ "$success" -ne 1 ]; then
- echo -e "${RED} $filename ${MESSAGES["FROM_ALL_URLS"]} ${MESSAGES["DOWNLOAD_FAIL"]}${PLAIN}"
- exit 1
- fi
-}
-
-version_gt() {
- test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1"
-}
-
-while true; do
- echo -e "${GREEN}${MESSAGES["CHOOSE_VERSION"]}${PLAIN}"
- echo -e "1. ${MESSAGES["DOCKER_CPU"]}"
- echo -e "2. ${MESSAGES["DOCKER_GPU"]}"
- read -p "${MESSAGES["ENTER_CHOICE"]}" choice_gpu
- case $choice_gpu in
- 1)
- echo -e "${MESSAGES["DOWNLOADING"]}"
- download_with_fallback docker-compose.yaml \
- "https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/docker-compose.yaml" \
- "https://ghproxy.com/https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/docker-compose.yaml"
- break
- ;;
- 2)
- echo -e "${MESSAGES["DOWNLOADING"]}"
- download_with_fallback docker-compose.yaml \
- "https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/docker-compose-gpu.yaml" \
- "https://ghproxy.com/https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/docker-compose-gpu.yaml"
- break
- ;;
- *)
- echo -e "${RED}${MESSAGES["INVALID_CHOICE"]}${PLAIN}"
- ;;
- esac
-done
-
-if [ "$choice_gpu" -eq 2 ]; then
- DOCKER_VERSION=$(docker version --format '{{.Server.Version}}')
- MIN_DOCKER_VERSION="19.03"
-
- if version_gt $MIN_DOCKER_VERSION $DOCKER_VERSION; then
- echo -e "${RED}Your Docker version ($DOCKER_VERSION) does not support GPU. You need at least version $MIN_DOCKER_VERSION.${PLAIN}"
- exit 1
- fi
-fi
-
-if ! command -v docker-compose &>/dev/null; then
- echo -e "${RED}docker-compose could not be found.${PLAIN}"
- exit 1
-fi
-
-echo -e "${GREEN}${MESSAGES["PULL_IMAGE"]}${PLAIN}"
-read -p "${MESSAGES["ENTER_CHOICE"]}" choice_pull
-
-if [ "$choice_pull" -eq 1 ]; then
- docker compose pull
- docker compose up -d
-fi
-
-echo -e "${YELLOW}${MESSAGES["DOWNLOAD_CONFIG"]}${PLAIN}"
-
-if [ ! -f config.py ]; then
- download_with_fallback config.py \
- "https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/config.py" \
- "https://ghproxy.com/https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/config.py"
-fi
-
-if [ ! -f gunicorn_config.py ]; then
- download_with_fallback gunicorn_config.py \
- "https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/gunicorn_config.py" \
- "https://ghproxy.com/https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/gunicorn_config.py"
-fi
-
-download_with_fallback config.example.py \
- "https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/config.py" \
- "https://ghproxy.com/https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/config.py"
-
-download_with_fallback gunicorn_config.example.py \
- "https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/gunicorn_config.py" \
- "https://ghproxy.com/https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/gunicorn_config.py"
-
-echo -e "${GREEN}${MESSAGES["DOWNLOAD_DICT"]}${PLAIN}"
-echo -e "${GREEN}${MESSAGES["MUST_DOWNLOAD_JP"]}${PLAIN}"
-read -p "${MESSAGES["ENTER_CHOICE"]}" choice_download_pyopenjtalk
-
-if [ "$choice_download_pyopenjtalk" -eq 1 ]; then
- mkdir -p pyopenjtalk
- echo -e "${MESSAGES["DOWNLOADING"]}"
- download_with_fallback open_jtalk_dic_utf_8-1.11.tar.gz \
- "https://github.com/r9y9/open_jtalk/releases/download/v1.11.1/open_jtalk_dic_utf_8-1.11.tar.gz" \
- "https://ghproxy.com/https://github.com/r9y9/open_jtalk/releases/download/v1.11.1/open_jtalk_dic_utf_8-1.11.tar.gz"
- echo -e "${MESSAGES["UNZIPPING"]}"
- tar -xzvf open_jtalk_dic_utf_8-1.11.tar.gz -C pyopenjtalk/
- rm open_jtalk_dic_utf_8-1.11.tar.gz
-fi
-
-echo -e "${GREEN}${MESSAGES["DOWNLOAD_VITS_CHINESE"]}${PLAIN}"
-echo -e "${GREEN}${MESSAGES["MUST_DOWNLOAD_VITS_CHINESE"]}${PLAIN}"
-read -p "${MESSAGES["ENTER_CHOICE"]}" choice_download_vits_chinese
-
-if [ "$choice_download_vits_chinese" -eq 1 ]; then
- mkdir -p vits/bert
-
- EXPECTED_MD5="dea78034433141adc8002404aa1b3184"
- FILE_PATH="vits/bert/prosody_model.pt"
- echo -e "${MESSAGES["VERIFYING"]}$FILE_PATH"
- ACTUAL_MD5=$(md5sum $FILE_PATH | awk '{print $1}')
-
- if [ "$EXPECTED_MD5" == "$ACTUAL_MD5" ]; then
- echo "${MESSAGES["DOWNLOADED"]}"
- else
- echo "${MESSAGES["CORRUPTED"]}"
- download_with_fallback vits/bert/prosody_model.pt \
- "https://huggingface.co/spaces/maxmax20160403/vits_chinese/resolve/main/bert/prosody_model.pt"
- fi
-
-fi
-
-echo -e "${GREEN}${MESSAGES["DOWNLOAD_BERT_VITS2"]}${PLAIN}"
-echo -e "${GREEN}${MESSAGES["MUST_DOWNLOAD_BERT_VITS2"]}${PLAIN}"
-read -p "${MESSAGES["ENTER_CHOICE"]}" choice_download_bert_vits2
-
-if [ "$choice_download_bert_vits2" -eq 1 ]; then
- mkdir -p bert_vits2/bert/chinese-roberta-wwm-ext-large
-
- EXPECTED_MD5="15d7435868fef1bd4222ff7820149a2a"
- FILE_PATH="bert_vits2/bert/chinese-roberta-wwm-ext-large/pytorch_model.bin"
- echo -e "${MESSAGES["VERIFYING"]}$FILE_PATH"
- ACTUAL_MD5=$(md5sum $FILE_PATH | awk '{print $1}')
-
- if [ "$EXPECTED_MD5" == "$ACTUAL_MD5" ]; then
- echo "${MESSAGES["DOWNLOADED"]}"
- else
- echo ${MESSAGES["CORRUPTED"]}
- download_with_fallback bert_vits2/bert/chinese-roberta-wwm-ext-large/pytorch_model.bin \
- "https://huggingface.co/hfl/chinese-roberta-wwm-ext-large/resolve/main/pytorch_model.bin"
- fi
-
- mkdir -p bert_vits2/bert/bert-base-japanese-v3
-
- EXPECTED_MD5="6d0f8f3503dae04df0711b6175ef0c8e"
- FILE_PATH="bert_vits2/bert/bert-base-japanese-v3/pytorch_model.bin"
- echo -e "${MESSAGES["VERIFYING"]}$FILE_PATH"
- ACTUAL_MD5=$(md5sum $FILE_PATH | awk '{print $1}')
-
- if [ "$EXPECTED_MD5" == "$ACTUAL_MD5" ]; then
- echo "${MESSAGES["DOWNLOADED"]}"
- else
- echo ${MESSAGES["CORRUPTED"]}
- download_with_fallback bert_vits2/bert/bert-base-japanese-v3/pytorch_model.bin \
- "https://huggingface.co/cl-tohoku/bert-base-japanese-v3/resolve/main/pytorch_model.bin"
- fi
-
-fi
-
-if [ "$choice_gpu" -eq 2 ]; then
- if ! docker run --gpus all artrajz/vits-simple-api:latest-gpu nvidia-smi &>/dev/null; then
- echo -e "${RED}Your Docker does not seem to support GPU or NVIDIA Docker is not installed properly.${PLAIN}"
- exit 1
- fi
-fi
-
-echo -e "\n${MESSAGES["INSTALL_COMPLETE"]}"
-echo -e "${MESSAGES["CONFIG_DIR"]} $(realpath $INSTALL_DIR)"
-echo -e "${YELLOW}${MESSAGES["IMPORT_NOTICE"]}${PLAIN}"
-echo -e "${YELLOW}${MESSAGES["RESTART_NOTICE"]}${PLAIN}"
-echo -e "${MESSAGES["ISSUE_NOTICE"]}"
-echo -e "${MESSAGES["GITHUB_LINK"]}"
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py
deleted file mode 100644
index 264d564dbda676b52f446c0d25433a15939a78a3..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py
+++ /dev/null
@@ -1,519 +0,0 @@
-"""
-This module uses ctypes to bind a whole bunch of functions and constants from
-SecureTransport. The goal here is to provide the low-level API to
-SecureTransport. These are essentially the C-level functions and constants, and
-they're pretty gross to work with.
-
-This code is a bastardised version of the code found in Will Bond's oscrypto
-library. An enormous debt is owed to him for blazing this trail for us. For
-that reason, this code should be considered to be covered both by urllib3's
-license and by oscrypto's:
-
- Copyright (c) 2015-2016 Will Bond
-
- Permission is hereby granted, free of charge, to any person obtaining a
- copy of this software and associated documentation files (the "Software"),
- to deal in the Software without restriction, including without limitation
- the rights to use, copy, modify, merge, publish, distribute, sublicense,
- and/or sell copies of the Software, and to permit persons to whom the
- Software is furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- DEALINGS IN THE SOFTWARE.
-"""
-from __future__ import absolute_import
-
-import platform
-from ctypes import (
- CDLL,
- CFUNCTYPE,
- POINTER,
- c_bool,
- c_byte,
- c_char_p,
- c_int32,
- c_long,
- c_size_t,
- c_uint32,
- c_ulong,
- c_void_p,
-)
-from ctypes.util import find_library
-
-from ...packages.six import raise_from
-
-if platform.system() != "Darwin":
- raise ImportError("Only macOS is supported")
-
-version = platform.mac_ver()[0]
-version_info = tuple(map(int, version.split(".")))
-if version_info < (10, 8):
- raise OSError(
- "Only OS X 10.8 and newer are supported, not %s.%s"
- % (version_info[0], version_info[1])
- )
-
-
-def load_cdll(name, macos10_16_path):
- """Loads a CDLL by name, falling back to known path on 10.16+"""
- try:
- # Big Sur is technically 11 but we use 10.16 due to the Big Sur
- # beta being labeled as 10.16.
- if version_info >= (10, 16):
- path = macos10_16_path
- else:
- path = find_library(name)
- if not path:
- raise OSError # Caught and reraised as 'ImportError'
- return CDLL(path, use_errno=True)
- except OSError:
- raise_from(ImportError("The library %s failed to load" % name), None)
-
-
-Security = load_cdll(
- "Security", "/System/Library/Frameworks/Security.framework/Security"
-)
-CoreFoundation = load_cdll(
- "CoreFoundation",
- "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation",
-)
-
-
-Boolean = c_bool
-CFIndex = c_long
-CFStringEncoding = c_uint32
-CFData = c_void_p
-CFString = c_void_p
-CFArray = c_void_p
-CFMutableArray = c_void_p
-CFDictionary = c_void_p
-CFError = c_void_p
-CFType = c_void_p
-CFTypeID = c_ulong
-
-CFTypeRef = POINTER(CFType)
-CFAllocatorRef = c_void_p
-
-OSStatus = c_int32
-
-CFDataRef = POINTER(CFData)
-CFStringRef = POINTER(CFString)
-CFArrayRef = POINTER(CFArray)
-CFMutableArrayRef = POINTER(CFMutableArray)
-CFDictionaryRef = POINTER(CFDictionary)
-CFArrayCallBacks = c_void_p
-CFDictionaryKeyCallBacks = c_void_p
-CFDictionaryValueCallBacks = c_void_p
-
-SecCertificateRef = POINTER(c_void_p)
-SecExternalFormat = c_uint32
-SecExternalItemType = c_uint32
-SecIdentityRef = POINTER(c_void_p)
-SecItemImportExportFlags = c_uint32
-SecItemImportExportKeyParameters = c_void_p
-SecKeychainRef = POINTER(c_void_p)
-SSLProtocol = c_uint32
-SSLCipherSuite = c_uint32
-SSLContextRef = POINTER(c_void_p)
-SecTrustRef = POINTER(c_void_p)
-SSLConnectionRef = c_uint32
-SecTrustResultType = c_uint32
-SecTrustOptionFlags = c_uint32
-SSLProtocolSide = c_uint32
-SSLConnectionType = c_uint32
-SSLSessionOption = c_uint32
-
-
-try:
- Security.SecItemImport.argtypes = [
- CFDataRef,
- CFStringRef,
- POINTER(SecExternalFormat),
- POINTER(SecExternalItemType),
- SecItemImportExportFlags,
- POINTER(SecItemImportExportKeyParameters),
- SecKeychainRef,
- POINTER(CFArrayRef),
- ]
- Security.SecItemImport.restype = OSStatus
-
- Security.SecCertificateGetTypeID.argtypes = []
- Security.SecCertificateGetTypeID.restype = CFTypeID
-
- Security.SecIdentityGetTypeID.argtypes = []
- Security.SecIdentityGetTypeID.restype = CFTypeID
-
- Security.SecKeyGetTypeID.argtypes = []
- Security.SecKeyGetTypeID.restype = CFTypeID
-
- Security.SecCertificateCreateWithData.argtypes = [CFAllocatorRef, CFDataRef]
- Security.SecCertificateCreateWithData.restype = SecCertificateRef
-
- Security.SecCertificateCopyData.argtypes = [SecCertificateRef]
- Security.SecCertificateCopyData.restype = CFDataRef
-
- Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
- Security.SecCopyErrorMessageString.restype = CFStringRef
-
- Security.SecIdentityCreateWithCertificate.argtypes = [
- CFTypeRef,
- SecCertificateRef,
- POINTER(SecIdentityRef),
- ]
- Security.SecIdentityCreateWithCertificate.restype = OSStatus
-
- Security.SecKeychainCreate.argtypes = [
- c_char_p,
- c_uint32,
- c_void_p,
- Boolean,
- c_void_p,
- POINTER(SecKeychainRef),
- ]
- Security.SecKeychainCreate.restype = OSStatus
-
- Security.SecKeychainDelete.argtypes = [SecKeychainRef]
- Security.SecKeychainDelete.restype = OSStatus
-
- Security.SecPKCS12Import.argtypes = [
- CFDataRef,
- CFDictionaryRef,
- POINTER(CFArrayRef),
- ]
- Security.SecPKCS12Import.restype = OSStatus
-
- SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
- SSLWriteFunc = CFUNCTYPE(
- OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)
- )
-
- Security.SSLSetIOFuncs.argtypes = [SSLContextRef, SSLReadFunc, SSLWriteFunc]
- Security.SSLSetIOFuncs.restype = OSStatus
-
- Security.SSLSetPeerID.argtypes = [SSLContextRef, c_char_p, c_size_t]
- Security.SSLSetPeerID.restype = OSStatus
-
- Security.SSLSetCertificate.argtypes = [SSLContextRef, CFArrayRef]
- Security.SSLSetCertificate.restype = OSStatus
-
- Security.SSLSetCertificateAuthorities.argtypes = [SSLContextRef, CFTypeRef, Boolean]
- Security.SSLSetCertificateAuthorities.restype = OSStatus
-
- Security.SSLSetConnection.argtypes = [SSLContextRef, SSLConnectionRef]
- Security.SSLSetConnection.restype = OSStatus
-
- Security.SSLSetPeerDomainName.argtypes = [SSLContextRef, c_char_p, c_size_t]
- Security.SSLSetPeerDomainName.restype = OSStatus
-
- Security.SSLHandshake.argtypes = [SSLContextRef]
- Security.SSLHandshake.restype = OSStatus
-
- Security.SSLRead.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
- Security.SSLRead.restype = OSStatus
-
- Security.SSLWrite.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
- Security.SSLWrite.restype = OSStatus
-
- Security.SSLClose.argtypes = [SSLContextRef]
- Security.SSLClose.restype = OSStatus
-
- Security.SSLGetNumberSupportedCiphers.argtypes = [SSLContextRef, POINTER(c_size_t)]
- Security.SSLGetNumberSupportedCiphers.restype = OSStatus
-
- Security.SSLGetSupportedCiphers.argtypes = [
- SSLContextRef,
- POINTER(SSLCipherSuite),
- POINTER(c_size_t),
- ]
- Security.SSLGetSupportedCiphers.restype = OSStatus
-
- Security.SSLSetEnabledCiphers.argtypes = [
- SSLContextRef,
- POINTER(SSLCipherSuite),
- c_size_t,
- ]
- Security.SSLSetEnabledCiphers.restype = OSStatus
-
- Security.SSLGetNumberEnabledCiphers.argtype = [SSLContextRef, POINTER(c_size_t)]
- Security.SSLGetNumberEnabledCiphers.restype = OSStatus
-
- Security.SSLGetEnabledCiphers.argtypes = [
- SSLContextRef,
- POINTER(SSLCipherSuite),
- POINTER(c_size_t),
- ]
- Security.SSLGetEnabledCiphers.restype = OSStatus
-
- Security.SSLGetNegotiatedCipher.argtypes = [SSLContextRef, POINTER(SSLCipherSuite)]
- Security.SSLGetNegotiatedCipher.restype = OSStatus
-
- Security.SSLGetNegotiatedProtocolVersion.argtypes = [
- SSLContextRef,
- POINTER(SSLProtocol),
- ]
- Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
-
- Security.SSLCopyPeerTrust.argtypes = [SSLContextRef, POINTER(SecTrustRef)]
- Security.SSLCopyPeerTrust.restype = OSStatus
-
- Security.SecTrustSetAnchorCertificates.argtypes = [SecTrustRef, CFArrayRef]
- Security.SecTrustSetAnchorCertificates.restype = OSStatus
-
- Security.SecTrustSetAnchorCertificatesOnly.argstypes = [SecTrustRef, Boolean]
- Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
-
- Security.SecTrustEvaluate.argtypes = [SecTrustRef, POINTER(SecTrustResultType)]
- Security.SecTrustEvaluate.restype = OSStatus
-
- Security.SecTrustGetCertificateCount.argtypes = [SecTrustRef]
- Security.SecTrustGetCertificateCount.restype = CFIndex
-
- Security.SecTrustGetCertificateAtIndex.argtypes = [SecTrustRef, CFIndex]
- Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
-
- Security.SSLCreateContext.argtypes = [
- CFAllocatorRef,
- SSLProtocolSide,
- SSLConnectionType,
- ]
- Security.SSLCreateContext.restype = SSLContextRef
-
- Security.SSLSetSessionOption.argtypes = [SSLContextRef, SSLSessionOption, Boolean]
- Security.SSLSetSessionOption.restype = OSStatus
-
- Security.SSLSetProtocolVersionMin.argtypes = [SSLContextRef, SSLProtocol]
- Security.SSLSetProtocolVersionMin.restype = OSStatus
-
- Security.SSLSetProtocolVersionMax.argtypes = [SSLContextRef, SSLProtocol]
- Security.SSLSetProtocolVersionMax.restype = OSStatus
-
- try:
- Security.SSLSetALPNProtocols.argtypes = [SSLContextRef, CFArrayRef]
- Security.SSLSetALPNProtocols.restype = OSStatus
- except AttributeError:
- # Supported only in 10.12+
- pass
-
- Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
- Security.SecCopyErrorMessageString.restype = CFStringRef
-
- Security.SSLReadFunc = SSLReadFunc
- Security.SSLWriteFunc = SSLWriteFunc
- Security.SSLContextRef = SSLContextRef
- Security.SSLProtocol = SSLProtocol
- Security.SSLCipherSuite = SSLCipherSuite
- Security.SecIdentityRef = SecIdentityRef
- Security.SecKeychainRef = SecKeychainRef
- Security.SecTrustRef = SecTrustRef
- Security.SecTrustResultType = SecTrustResultType
- Security.SecExternalFormat = SecExternalFormat
- Security.OSStatus = OSStatus
-
- Security.kSecImportExportPassphrase = CFStringRef.in_dll(
- Security, "kSecImportExportPassphrase"
- )
- Security.kSecImportItemIdentity = CFStringRef.in_dll(
- Security, "kSecImportItemIdentity"
- )
-
- # CoreFoundation time!
- CoreFoundation.CFRetain.argtypes = [CFTypeRef]
- CoreFoundation.CFRetain.restype = CFTypeRef
-
- CoreFoundation.CFRelease.argtypes = [CFTypeRef]
- CoreFoundation.CFRelease.restype = None
-
- CoreFoundation.CFGetTypeID.argtypes = [CFTypeRef]
- CoreFoundation.CFGetTypeID.restype = CFTypeID
-
- CoreFoundation.CFStringCreateWithCString.argtypes = [
- CFAllocatorRef,
- c_char_p,
- CFStringEncoding,
- ]
- CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
-
- CoreFoundation.CFStringGetCStringPtr.argtypes = [CFStringRef, CFStringEncoding]
- CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
-
- CoreFoundation.CFStringGetCString.argtypes = [
- CFStringRef,
- c_char_p,
- CFIndex,
- CFStringEncoding,
- ]
- CoreFoundation.CFStringGetCString.restype = c_bool
-
- CoreFoundation.CFDataCreate.argtypes = [CFAllocatorRef, c_char_p, CFIndex]
- CoreFoundation.CFDataCreate.restype = CFDataRef
-
- CoreFoundation.CFDataGetLength.argtypes = [CFDataRef]
- CoreFoundation.CFDataGetLength.restype = CFIndex
-
- CoreFoundation.CFDataGetBytePtr.argtypes = [CFDataRef]
- CoreFoundation.CFDataGetBytePtr.restype = c_void_p
-
- CoreFoundation.CFDictionaryCreate.argtypes = [
- CFAllocatorRef,
- POINTER(CFTypeRef),
- POINTER(CFTypeRef),
- CFIndex,
- CFDictionaryKeyCallBacks,
- CFDictionaryValueCallBacks,
- ]
- CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
-
- CoreFoundation.CFDictionaryGetValue.argtypes = [CFDictionaryRef, CFTypeRef]
- CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
-
- CoreFoundation.CFArrayCreate.argtypes = [
- CFAllocatorRef,
- POINTER(CFTypeRef),
- CFIndex,
- CFArrayCallBacks,
- ]
- CoreFoundation.CFArrayCreate.restype = CFArrayRef
-
- CoreFoundation.CFArrayCreateMutable.argtypes = [
- CFAllocatorRef,
- CFIndex,
- CFArrayCallBacks,
- ]
- CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
-
- CoreFoundation.CFArrayAppendValue.argtypes = [CFMutableArrayRef, c_void_p]
- CoreFoundation.CFArrayAppendValue.restype = None
-
- CoreFoundation.CFArrayGetCount.argtypes = [CFArrayRef]
- CoreFoundation.CFArrayGetCount.restype = CFIndex
-
- CoreFoundation.CFArrayGetValueAtIndex.argtypes = [CFArrayRef, CFIndex]
- CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
-
- CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
- CoreFoundation, "kCFAllocatorDefault"
- )
- CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(
- CoreFoundation, "kCFTypeArrayCallBacks"
- )
- CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
- CoreFoundation, "kCFTypeDictionaryKeyCallBacks"
- )
- CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
- CoreFoundation, "kCFTypeDictionaryValueCallBacks"
- )
-
- CoreFoundation.CFTypeRef = CFTypeRef
- CoreFoundation.CFArrayRef = CFArrayRef
- CoreFoundation.CFStringRef = CFStringRef
- CoreFoundation.CFDictionaryRef = CFDictionaryRef
-
-except (AttributeError):
- raise ImportError("Error initializing ctypes")
-
-
-class CFConst(object):
- """
- A class object that acts as essentially a namespace for CoreFoundation
- constants.
- """
-
- kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
-
-
-class SecurityConst(object):
- """
- A class object that acts as essentially a namespace for Security constants.
- """
-
- kSSLSessionOptionBreakOnServerAuth = 0
-
- kSSLProtocol2 = 1
- kSSLProtocol3 = 2
- kTLSProtocol1 = 4
- kTLSProtocol11 = 7
- kTLSProtocol12 = 8
- # SecureTransport does not support TLS 1.3 even if there's a constant for it
- kTLSProtocol13 = 10
- kTLSProtocolMaxSupported = 999
-
- kSSLClientSide = 1
- kSSLStreamType = 0
-
- kSecFormatPEMSequence = 10
-
- kSecTrustResultInvalid = 0
- kSecTrustResultProceed = 1
- # This gap is present on purpose: this was kSecTrustResultConfirm, which
- # is deprecated.
- kSecTrustResultDeny = 3
- kSecTrustResultUnspecified = 4
- kSecTrustResultRecoverableTrustFailure = 5
- kSecTrustResultFatalTrustFailure = 6
- kSecTrustResultOtherError = 7
-
- errSSLProtocol = -9800
- errSSLWouldBlock = -9803
- errSSLClosedGraceful = -9805
- errSSLClosedNoNotify = -9816
- errSSLClosedAbort = -9806
-
- errSSLXCertChainInvalid = -9807
- errSSLCrypto = -9809
- errSSLInternal = -9810
- errSSLCertExpired = -9814
- errSSLCertNotYetValid = -9815
- errSSLUnknownRootCert = -9812
- errSSLNoRootCert = -9813
- errSSLHostNameMismatch = -9843
- errSSLPeerHandshakeFail = -9824
- errSSLPeerUserCancelled = -9839
- errSSLWeakPeerEphemeralDHKey = -9850
- errSSLServerAuthCompleted = -9841
- errSSLRecordOverflow = -9847
-
- errSecVerifyFailed = -67808
- errSecNoTrustSettings = -25263
- errSecItemNotFound = -25300
- errSecInvalidTrustSettings = -25262
-
- # Cipher suites. We only pick the ones our default cipher string allows.
- # Source: https://developer.apple.com/documentation/security/1550981-ssl_cipher_suite_values
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8
- TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
- TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
- TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
- TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
- TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
- TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
- TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
- TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
- TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
- TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
- TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
- TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
- TLS_AES_128_GCM_SHA256 = 0x1301
- TLS_AES_256_GCM_SHA384 = 0x1302
- TLS_AES_128_CCM_8_SHA256 = 0x1305
- TLS_AES_128_CCM_SHA256 = 0x1304
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/pyparsing/testing.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/pyparsing/testing.py
deleted file mode 100644
index 84a0ef17078c99e5917db41e3dbaf035fe206d7c..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/pyparsing/testing.py
+++ /dev/null
@@ -1,331 +0,0 @@
-# testing.py
-
-from contextlib import contextmanager
-import typing
-
-from .core import (
- ParserElement,
- ParseException,
- Keyword,
- __diag__,
- __compat__,
-)
-
-
-class pyparsing_test:
- """
- namespace class for classes useful in writing unit tests
- """
-
- class reset_pyparsing_context:
- """
- Context manager to be used when writing unit tests that modify pyparsing config values:
- - packrat parsing
- - bounded recursion parsing
- - default whitespace characters.
- - default keyword characters
- - literal string auto-conversion class
- - __diag__ settings
-
- Example::
-
- with reset_pyparsing_context():
- # test that literals used to construct a grammar are automatically suppressed
- ParserElement.inlineLiteralsUsing(Suppress)
-
- term = Word(alphas) | Word(nums)
- group = Group('(' + term[...] + ')')
-
- # assert that the '()' characters are not included in the parsed tokens
- self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def'])
-
- # after exiting context manager, literals are converted to Literal expressions again
- """
-
- def __init__(self):
- self._save_context = {}
-
- def save(self):
- self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
- self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
-
- self._save_context[
- "literal_string_class"
- ] = ParserElement._literalStringClass
-
- self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace
-
- self._save_context["packrat_enabled"] = ParserElement._packratEnabled
- if ParserElement._packratEnabled:
- self._save_context[
- "packrat_cache_size"
- ] = ParserElement.packrat_cache.size
- else:
- self._save_context["packrat_cache_size"] = None
- self._save_context["packrat_parse"] = ParserElement._parse
- self._save_context[
- "recursion_enabled"
- ] = ParserElement._left_recursion_enabled
-
- self._save_context["__diag__"] = {
- name: getattr(__diag__, name) for name in __diag__._all_names
- }
-
- self._save_context["__compat__"] = {
- "collect_all_And_tokens": __compat__.collect_all_And_tokens
- }
-
- return self
-
- def restore(self):
- # reset pyparsing global state
- if (
- ParserElement.DEFAULT_WHITE_CHARS
- != self._save_context["default_whitespace"]
- ):
- ParserElement.set_default_whitespace_chars(
- self._save_context["default_whitespace"]
- )
-
- ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"]
-
- Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
- ParserElement.inlineLiteralsUsing(
- self._save_context["literal_string_class"]
- )
-
- for name, value in self._save_context["__diag__"].items():
- (__diag__.enable if value else __diag__.disable)(name)
-
- ParserElement._packratEnabled = False
- if self._save_context["packrat_enabled"]:
- ParserElement.enable_packrat(self._save_context["packrat_cache_size"])
- else:
- ParserElement._parse = self._save_context["packrat_parse"]
- ParserElement._left_recursion_enabled = self._save_context[
- "recursion_enabled"
- ]
-
- __compat__.collect_all_And_tokens = self._save_context["__compat__"]
-
- return self
-
- def copy(self):
- ret = type(self)()
- ret._save_context.update(self._save_context)
- return ret
-
- def __enter__(self):
- return self.save()
-
- def __exit__(self, *args):
- self.restore()
-
- class TestParseResultsAsserts:
- """
- A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
- """
-
- def assertParseResultsEquals(
- self, result, expected_list=None, expected_dict=None, msg=None
- ):
- """
- Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``,
- and compare any defined results names with an optional ``expected_dict``.
- """
- if expected_list is not None:
- self.assertEqual(expected_list, result.as_list(), msg=msg)
- if expected_dict is not None:
- self.assertEqual(expected_dict, result.as_dict(), msg=msg)
-
- def assertParseAndCheckList(
- self, expr, test_string, expected_list, msg=None, verbose=True
- ):
- """
- Convenience wrapper assert to test a parser element and input string, and assert that
- the resulting ``ParseResults.asList()`` is equal to the ``expected_list``.
- """
- result = expr.parse_string(test_string, parse_all=True)
- if verbose:
- print(result.dump())
- else:
- print(result.as_list())
- self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
-
- def assertParseAndCheckDict(
- self, expr, test_string, expected_dict, msg=None, verbose=True
- ):
- """
- Convenience wrapper assert to test a parser element and input string, and assert that
- the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``.
- """
- result = expr.parse_string(test_string, parseAll=True)
- if verbose:
- print(result.dump())
- else:
- print(result.as_list())
- self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
-
- def assertRunTestResults(
- self, run_tests_report, expected_parse_results=None, msg=None
- ):
- """
- Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of
- list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped
- with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``.
- Finally, asserts that the overall ``runTests()`` success value is ``True``.
-
- :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
- :param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
- """
- run_test_success, run_test_results = run_tests_report
-
- if expected_parse_results is not None:
- merged = [
- (*rpt, expected)
- for rpt, expected in zip(run_test_results, expected_parse_results)
- ]
- for test_string, result, expected in merged:
- # expected should be a tuple containing a list and/or a dict or an exception,
- # and optional failure message string
- # an empty tuple will skip any result validation
- fail_msg = next(
- (exp for exp in expected if isinstance(exp, str)), None
- )
- expected_exception = next(
- (
- exp
- for exp in expected
- if isinstance(exp, type) and issubclass(exp, Exception)
- ),
- None,
- )
- if expected_exception is not None:
- with self.assertRaises(
- expected_exception=expected_exception, msg=fail_msg or msg
- ):
- if isinstance(result, Exception):
- raise result
- else:
- expected_list = next(
- (exp for exp in expected if isinstance(exp, list)), None
- )
- expected_dict = next(
- (exp for exp in expected if isinstance(exp, dict)), None
- )
- if (expected_list, expected_dict) != (None, None):
- self.assertParseResultsEquals(
- result,
- expected_list=expected_list,
- expected_dict=expected_dict,
- msg=fail_msg or msg,
- )
- else:
- # warning here maybe?
- print("no validation for {!r}".format(test_string))
-
- # do this last, in case some specific test results can be reported instead
- self.assertTrue(
- run_test_success, msg=msg if msg is not None else "failed runTests"
- )
-
- @contextmanager
- def assertRaisesParseException(self, exc_type=ParseException, msg=None):
- with self.assertRaises(exc_type, msg=msg):
- yield
-
- @staticmethod
- def with_line_numbers(
- s: str,
- start_line: typing.Optional[int] = None,
- end_line: typing.Optional[int] = None,
- expand_tabs: bool = True,
- eol_mark: str = "|",
- mark_spaces: typing.Optional[str] = None,
- mark_control: typing.Optional[str] = None,
- ) -> str:
- """
- Helpful method for debugging a parser - prints a string with line and column numbers.
- (Line and column numbers are 1-based.)
-
- :param s: tuple(bool, str - string to be printed with line and column numbers
- :param start_line: int - (optional) starting line number in s to print (default=1)
- :param end_line: int - (optional) ending line number in s to print (default=len(s))
- :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default
- :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|")
- :param mark_spaces: str - (optional) special character to display in place of spaces
- :param mark_control: str - (optional) convert non-printing control characters to a placeholding
- character; valid values:
- - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊"
- - any single character string - replace control characters with given string
- - None (default) - string is displayed as-is
-
- :return: str - input string with leading line numbers and column number headers
- """
- if expand_tabs:
- s = s.expandtabs()
- if mark_control is not None:
- if mark_control == "unicode":
- tbl = str.maketrans(
- {c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))}
- | {127: 0x2421}
- )
- eol_mark = ""
- else:
- tbl = str.maketrans(
- {c: mark_control for c in list(range(0, 32)) + [127]}
- )
- s = s.translate(tbl)
- if mark_spaces is not None and mark_spaces != " ":
- if mark_spaces == "unicode":
- tbl = str.maketrans({9: 0x2409, 32: 0x2423})
- s = s.translate(tbl)
- else:
- s = s.replace(" ", mark_spaces)
- if start_line is None:
- start_line = 1
- if end_line is None:
- end_line = len(s)
- end_line = min(end_line, len(s))
- start_line = min(max(1, start_line), end_line)
-
- if mark_control != "unicode":
- s_lines = s.splitlines()[start_line - 1 : end_line]
- else:
- s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]]
- if not s_lines:
- return ""
-
- lineno_width = len(str(end_line))
- max_line_len = max(len(line) for line in s_lines)
- lead = " " * (lineno_width + 1)
- if max_line_len >= 99:
- header0 = (
- lead
- + "".join(
- "{}{}".format(" " * 99, (i + 1) % 100)
- for i in range(max(max_line_len // 100, 1))
- )
- + "\n"
- )
- else:
- header0 = ""
- header1 = (
- header0
- + lead
- + "".join(
- " {}".format((i + 1) % 10)
- for i in range(-(-max_line_len // 10))
- )
- + "\n"
- )
- header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n"
- return (
- header1
- + header2
- + "\n".join(
- "{:{}d}:{}{}".format(i, lineno_width, line, eol_mark)
- for i, line in enumerate(s_lines, start=start_line)
- )
- + "\n"
- )
diff --git a/spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/utils.py b/spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/utils.py
deleted file mode 100644
index 8d6a6b7ea29d9edfc0a69debbfcd11cc88c98a28..0000000000000000000000000000000000000000
--- a/spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/utils.py
+++ /dev/null
@@ -1,361 +0,0 @@
-import numpy as np
-import torch
-from torch import nn as nn
-from torchvision.ops.misc import FrozenBatchNorm2d
-import logging
-import h5py
-from tqdm import tqdm
-import random
-import json
-import os
-import pathlib
-
-# TODO: (yusong) this not a good place to store those information and does not scale. Need to be fixed later.
-dataset_split = {
- "audiocaps": ["train", "valid", "test"],
- "audioset": ["balanced_train", "unbalanced_train", "eval"],
- "BBCSoundEffects": ["train", "test"],
- "Clotho": ["train", "test", "valid"],
- "free_to_use_sounds": ["train", "test"],
- "paramount_motion": ["train", "test"],
- "sonniss_game_effects": ["train", "test"],
- "wesoundeffects": ["train", "test"],
- "MACS": ["train", "test"],
- "freesound": ["train", "test"],
- "FSD50K": ["train", "test", "valid"],
- "fsd50k_class_label": ["train", "test", "valid"],
- "esc50": ["train", "test"],
- "audiostock": ["train", "test"],
- "freesound_no_overlap_noesc50": ["train", "test"],
- "epidemic_sound_effects": ["train", "test"],
- "VGGSound": ["train", "test"],
- "urbansound8k_class_label": ["train", "test"],
- "audioset_t5": ["balanced_train", "unbalanced_train", "eval"],
- "epidemic_sound_effects_t5": ["train", "test"],
- "WavText5K": ["train", "test"],
- "esc50_no_overlap": ["train", "test"],
- "usd8k_no_overlap": ["train", "test"],
- "fsd50k_200_class_label": ["train", "test", "valid"],
-}
-
-
-def freeze_batch_norm_2d(module, module_match={}, name=""):
- """
- Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
- itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
- returned. Otherwise, the module is walked recursively and submodules are converted in place.
-
- Args:
- module (torch.nn.Module): Any PyTorch module.
- module_match (dict): Dictionary of full module names to freeze (all if empty)
- name (str): Full module name (prefix)
-
- Returns:
- torch.nn.Module: Resulting module
-
- Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
- """
- res = module
- is_match = True
- if module_match:
- is_match = name in module_match
- if is_match and isinstance(
- module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)
- ):
- res = FrozenBatchNorm2d(module.num_features)
- res.num_features = module.num_features
- res.affine = module.affine
- if module.affine:
- res.weight.data = module.weight.data.clone().detach()
- res.bias.data = module.bias.data.clone().detach()
- res.running_mean.data = module.running_mean.data
- res.running_var.data = module.running_var.data
- res.eps = module.eps
- else:
- for child_name, child in module.named_children():
- full_child_name = ".".join([name, child_name]) if name else child_name
- new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
- if new_child is not child:
- res.add_module(child_name, new_child)
- return res
-
-
-def exist(dataset_name, dataset_type):
- """
- Check if dataset exists
- """
- if dataset_type in dataset_split[dataset_name]:
- return True
- else:
- return False
-
-
-def get_tar_path_from_dataset_name(
- dataset_names, dataset_types, islocal, dataset_path, proportion=1, full_dataset=None
-):
- """
- Get tar path from dataset name and type
- """
- output = []
- for n in dataset_names:
- if full_dataset is not None and n in full_dataset:
- current_dataset_types = dataset_split[n]
- else:
- current_dataset_types = dataset_types
- for s in current_dataset_types:
- tmp = []
- if islocal:
- sizefilepath_ = f"{dataset_path}/{n}/{s}/sizes.json"
- if not os.path.exists(sizefilepath_):
- sizefilepath_ = f"./json_files/{n}/{s}/sizes.json"
- else:
- sizefilepath_ = f"./json_files/{n}/{s}/sizes.json"
- if not os.path.exists(sizefilepath_):
- continue
- sizes = json.load(open(sizefilepath_, "r"))
- for k in sizes.keys():
- if islocal:
- tmp.append(f"{dataset_path}/{n}/{s}/{k}")
- else:
- tmp.append(
- f"pipe:aws s3 --cli-connect-timeout 0 cp s3://s-laion-audio/webdataset_tar/{n}/{s}/{k} -"
- )
- if proportion != 1:
- tmp = random.sample(tmp, int(proportion * len(tmp)))
- output.append(tmp)
- return sum(output, [])
-
-
-def get_tar_path_from_txts(txt_path, islocal, proportion=1):
- """
- Get tar path from txt path
- """
- if isinstance(txt_path, (list, tuple)):
- return sum(
- [
- get_tar_path_from_txts(
- txt_path[i], islocal=islocal, proportion=proportion
- )
- for i in range(len(txt_path))
- ],
- [],
- )
- if isinstance(txt_path, str):
- with open(txt_path) as f:
- lines = f.readlines()
- if islocal:
- lines = [
- lines[i]
- .split("\n")[0]
- .replace("pipe:aws s3 cp s3://s-laion-audio/", "/mnt/audio_clip/")
- for i in range(len(lines))
- ]
- else:
- lines = [
- lines[i].split("\n")[0].replace(".tar", ".tar -")
- for i in range(len(lines))
- ]
- if proportion != 1:
- print("Sampling tars with proportion of {}".format(proportion))
- lines = random.sample(lines, int(proportion * len(lines)))
- return lines
-
-
-def get_mix_lambda(mixup_alpha, batch_size):
- mixup_lambdas = [
- np.random.beta(mixup_alpha, mixup_alpha, 1)[0] for _ in range(batch_size)
- ]
- return np.array(mixup_lambdas).astype(np.float32)
-
-
-def do_mixup(x, mixup_lambda):
- """
- Args:
- x: (batch_size , ...)
- mixup_lambda: (batch_size,)
- Returns:
- out: (batch_size, ...)
- """
- out = (
- x.transpose(0, -1) * mixup_lambda
- + torch.flip(x, dims=[0]).transpose(0, -1) * (1 - mixup_lambda)
- ).transpose(0, -1)
- return out
-
-
-def interpolate(x, ratio):
- """Interpolate data in time domain. This is used to compensate the
- resolution reduction in downsampling of a CNN.
-
- Args:
- x: (batch_size, time_steps, classes_num)
- ratio: int, ratio to interpolate
- Returns:
- upsampled: (batch_size, time_steps * ratio, classes_num)
- """
- (batch_size, time_steps, classes_num) = x.shape
- upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
- upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
- return upsampled
-
-
-def pad_framewise_output(framewise_output, frames_num):
- """Pad framewise_output to the same length as input frames. The pad value
- is the same as the value of the last frame.
- Args:
- framewise_output: (batch_size, frames_num, classes_num)
- frames_num: int, number of frames to pad
- Outputs:
- output: (batch_size, frames_num, classes_num)
- """
- pad = framewise_output[:, -1:, :].repeat(
- 1, frames_num - framewise_output.shape[1], 1
- )
- """tensor for padding"""
-
- output = torch.cat((framewise_output, pad), dim=1)
- """(batch_size, frames_num, classes_num)"""
-
-
-def process_ipc(index_path, classes_num, filename):
- # load data
- logging.info("Load Data...............")
- ipc = [[] for _ in range(classes_num)]
- with h5py.File(index_path, "r") as f:
- for i in tqdm(range(len(f["target"]))):
- t_class = np.where(f["target"][i])[0]
- for t in t_class:
- ipc[t].append(i)
- print(ipc)
- np.save(filename, ipc)
- logging.info("Load Data Succeed...............")
-
-
-def save_to_dict(s, o_={}):
- sp = s.split(": ")
- o_.update({sp[0]: float(sp[1])})
- return o_
-
-
-def get_data_from_log(txt_path):
- """
- Output dictionary from out.txt log file
- """
- with open(txt_path) as f:
- lines = f.readlines()
- val_data = {}
- train_data = {}
- train_losses = []
- train_losses_epoch = []
- for i in range(len(lines)):
- if "| INFO |" in lines[i]:
- if "Eval Epoch" in lines[i]:
- if "val_loss" in lines[i]:
- # float(regex.sub("", lines[310].split(" ")[-1]).replace(" ", ""))
- line = lines[i].split("Eval Epoch: ")[-1]
- num_epoch = int(line.split(" ")[0].split(" ")[0])
- d = {
- line.split(" ")[0]
- .split(" ")[1]
- .replace(":", ""): float(line.split(" ")[0].split(" ")[-1])
- }
- for i in range(1, len(line.split(" "))):
- d = save_to_dict(line.split(" ")[i], d)
- val_data[num_epoch] = d
- elif "Train Epoch" in lines[i]:
- num_epoch = int(lines[i].split("Train Epoch: ")[1][0])
- loss = float(lines[i].split("Loss: ")[-1].split(" (")[0])
- train_losses.append(loss)
- train_losses_epoch.append(num_epoch)
- for i in range(len(train_losses)):
- train_data[i] = {
- "num_epoch": train_losses_epoch[i],
- "train_loss": train_losses[i],
- }
- return train_data, val_data
-
-
-def save_p(obj, filename):
- import pickle
-
- try:
- from deepdiff import DeepDiff
- except:
- os.system("pip install deepdiff")
- from deepdiff import DeepDiff
- with open(filename, "wb") as file:
- pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL) # highest protocol
- with open(filename, "rb") as file:
- z = pickle.load(file)
- assert (
- DeepDiff(obj, z, ignore_string_case=True) == {}
- ), "there is something wrong with the saving process"
- return
-
-
-def load_p(filename):
- import pickle
-
- with open(filename, "rb") as file:
- z = pickle.load(file)
- return z
-
-
-def save_json(data, name="data.json"):
- import json
-
- with open(name, "w") as fp:
- json.dump(data, fp)
- return
-
-
-def load_json(name):
- import json
-
- with open(name, "r") as fp:
- data = json.load(fp)
- return data
-
-
-from multiprocessing import Process, Manager
-from multiprocessing import Process, Value, Array
-from ctypes import c_wchar
-
-
-def load_class_label(path):
- # https://stackoverflow.com/questions/48004243/how-to-share-large-read-only-dictionary-list-across-processes-in-multiprocessing
- # https://stackoverflow.com/questions/45693949/storing-strings-in-a-multiprocessing-sharedctypes-array
- out = None
- if path is not None:
- if pathlib.Path(path).suffix in [".pkl", ".pickle"]:
- out = load_p(path)
- elif pathlib.Path(path).suffix in [".json", ".txt"]:
- out = load_json(path)
- elif pathlib.Path(path).suffix in [".npy", ".npz"]:
- out = np.load(path)
- elif pathlib.Path(path).suffix in [".csv"]:
- import pandas as pd
-
- out = pd.read_csv(path)
- return out
- # if out is None:
- # return None
- # else:
- # key = Array(c_wchar, '\n'.join(list(out.keys())), lock=False)
- # val = Array('i', out.values(), lock=False)
- # return (key, val)
-
-
-from torch import optim
-
-
-def get_optimizer(params, lr, betas, eps, momentum, optimizer_name):
- if optimizer_name.lower() == "adamw":
- optimizer = optim.AdamW(params, lr=lr, betas=betas, eps=eps)
- elif optimizer_name.lower() == "sgd":
- optimizer = optim.SGD(params, lr=lr, momentum=momentum)
- elif optimizer_name.lower() == "adam":
- optimizer = optim.Adam(params, lr=lr, betas=betas, eps=eps)
- else:
- raise ValueError("optimizer name is not correct")
- return optimizer
diff --git a/spaces/Basit12345/basit123/app.py b/spaces/Basit12345/basit123/app.py
deleted file mode 100644
index ac24b219d3a40095cd7b1a172f8e19fe1fb1fb74..0000000000000000000000000000000000000000
--- a/spaces/Basit12345/basit123/app.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import openai
-import gradio as gr
-
-
-
-openai.api_key = "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"
-
-messages = [{"role": "system", "content": "You are a financial experts that specializes in real estate investment and negotiation"}]
-
-
-
-
-def CustomChatGPT(user_input):
- messages.append({"role": "user", "content": user_input})
- response = openai.ChatCompletion.create(
- model = "gpt-3.5-turbo",
- messages = messages
- )
- ChatGPT_reply = response["choices"][0]["message"]["content"]
- messages.append({"role": "assistant", "content": ChatGPT_reply})
- return ChatGPT_reply
-
-iface = gr.Interface(fn=CustomChatGPT, inputs = "text", outputs = "text", title = "basit cyberwala gpt")
-iface.launch()
-
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/diagram/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/diagram/__init__.py
deleted file mode 100644
index 898644755cbbf9a8d4df562663114a7eb7e11fd1..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/diagram/__init__.py
+++ /dev/null
@@ -1,642 +0,0 @@
-import railroad
-import pyparsing
-import typing
-from typing import (
- List,
- NamedTuple,
- Generic,
- TypeVar,
- Dict,
- Callable,
- Set,
- Iterable,
-)
-from jinja2 import Template
-from io import StringIO
-import inspect
-
-
-jinja2_template_source = """\
-
-
-
- {% if not head %}
-
- {% else %}
- {{ head | safe }}
- {% endif %}
-
-
-{{ body | safe }}
-{% for diagram in diagrams %}
-
-
{{ diagram.title }}
-
{{ diagram.text }}
-
- {{ diagram.svg }}
-
-
-{% endfor %}
-
-
-"""
-
-template = Template(jinja2_template_source)
-
-# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet
-NamedDiagram = NamedTuple(
- "NamedDiagram",
- [("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)],
-)
-"""
-A simple structure for associating a name with a railroad diagram
-"""
-
-T = TypeVar("T")
-
-
-class EachItem(railroad.Group):
- """
- Custom railroad item to compose a:
- - Group containing a
- - OneOrMore containing a
- - Choice of the elements in the Each
- with the group label indicating that all must be matched
- """
-
- all_label = "[ALL]"
-
- def __init__(self, *items):
- choice_item = railroad.Choice(len(items) - 1, *items)
- one_or_more_item = railroad.OneOrMore(item=choice_item)
- super().__init__(one_or_more_item, label=self.all_label)
-
-
-class AnnotatedItem(railroad.Group):
- """
- Simple subclass of Group that creates an annotation label
- """
-
- def __init__(self, label: str, item):
- super().__init__(item=item, label="[{}]".format(label) if label else label)
-
-
-class EditablePartial(Generic[T]):
- """
- Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been
- constructed.
- """
-
- # We need this here because the railroad constructors actually transform the data, so can't be called until the
- # entire tree is assembled
-
- def __init__(self, func: Callable[..., T], args: list, kwargs: dict):
- self.func = func
- self.args = args
- self.kwargs = kwargs
-
- @classmethod
- def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]":
- """
- If you call this function in the same way that you would call the constructor, it will store the arguments
- as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3)
- """
- return EditablePartial(func=func, args=list(args), kwargs=kwargs)
-
- @property
- def name(self):
- return self.kwargs["name"]
-
- def __call__(self) -> T:
- """
- Evaluate the partial and return the result
- """
- args = self.args.copy()
- kwargs = self.kwargs.copy()
-
- # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g.
- # args=['list', 'of', 'things'])
- arg_spec = inspect.getfullargspec(self.func)
- if arg_spec.varargs in self.kwargs:
- args += kwargs.pop(arg_spec.varargs)
-
- return self.func(*args, **kwargs)
-
-
-def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str:
- """
- Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams
- :params kwargs: kwargs to be passed in to the template
- """
- data = []
- for diagram in diagrams:
- if diagram.diagram is None:
- continue
- io = StringIO()
- diagram.diagram.writeSvg(io.write)
- title = diagram.name
- if diagram.index == 0:
- title += " (root)"
- data.append({"title": title, "text": "", "svg": io.getvalue()})
-
- return template.render(diagrams=data, **kwargs)
-
-
-def resolve_partial(partial: "EditablePartial[T]") -> T:
- """
- Recursively resolves a collection of Partials into whatever type they are
- """
- if isinstance(partial, EditablePartial):
- partial.args = resolve_partial(partial.args)
- partial.kwargs = resolve_partial(partial.kwargs)
- return partial()
- elif isinstance(partial, list):
- return [resolve_partial(x) for x in partial]
- elif isinstance(partial, dict):
- return {key: resolve_partial(x) for key, x in partial.items()}
- else:
- return partial
-
-
-def to_railroad(
- element: pyparsing.ParserElement,
- diagram_kwargs: typing.Optional[dict] = None,
- vertical: int = 3,
- show_results_names: bool = False,
- show_groups: bool = False,
-) -> List[NamedDiagram]:
- """
- Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram
- creation if you want to access the Railroad tree before it is converted to HTML
- :param element: base element of the parser being diagrammed
- :param diagram_kwargs: kwargs to pass to the Diagram() constructor
- :param vertical: (optional) - int - limit at which number of alternatives should be
- shown vertically instead of horizontally
- :param show_results_names - bool to indicate whether results name annotations should be
- included in the diagram
- :param show_groups - bool to indicate whether groups should be highlighted with an unlabeled
- surrounding box
- """
- # Convert the whole tree underneath the root
- lookup = ConverterState(diagram_kwargs=diagram_kwargs or {})
- _to_diagram_element(
- element,
- lookup=lookup,
- parent=None,
- vertical=vertical,
- show_results_names=show_results_names,
- show_groups=show_groups,
- )
-
- root_id = id(element)
- # Convert the root if it hasn't been already
- if root_id in lookup:
- if not element.customName:
- lookup[root_id].name = ""
- lookup[root_id].mark_for_extraction(root_id, lookup, force=True)
-
- # Now that we're finished, we can convert from intermediate structures into Railroad elements
- diags = list(lookup.diagrams.values())
- if len(diags) > 1:
- # collapse out duplicate diags with the same name
- seen = set()
- deduped_diags = []
- for d in diags:
- # don't extract SkipTo elements, they are uninformative as subdiagrams
- if d.name == "...":
- continue
- if d.name is not None and d.name not in seen:
- seen.add(d.name)
- deduped_diags.append(d)
- resolved = [resolve_partial(partial) for partial in deduped_diags]
- else:
- # special case - if just one diagram, always display it, even if
- # it has no name
- resolved = [resolve_partial(partial) for partial in diags]
- return sorted(resolved, key=lambda diag: diag.index)
-
-
-def _should_vertical(
- specification: int, exprs: Iterable[pyparsing.ParserElement]
-) -> bool:
- """
- Returns true if we should return a vertical list of elements
- """
- if specification is None:
- return False
- else:
- return len(_visible_exprs(exprs)) >= specification
-
-
-class ElementState:
- """
- State recorded for an individual pyparsing Element
- """
-
- # Note: this should be a dataclass, but we have to support Python 3.5
- def __init__(
- self,
- element: pyparsing.ParserElement,
- converted: EditablePartial,
- parent: EditablePartial,
- number: int,
- name: str = None,
- parent_index: typing.Optional[int] = None,
- ):
- #: The pyparsing element that this represents
- self.element: pyparsing.ParserElement = element
- #: The name of the element
- self.name: typing.Optional[str] = name
- #: The output Railroad element in an unconverted state
- self.converted: EditablePartial = converted
- #: The parent Railroad element, which we store so that we can extract this if it's duplicated
- self.parent: EditablePartial = parent
- #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram
- self.number: int = number
- #: The index of this inside its parent
- self.parent_index: typing.Optional[int] = parent_index
- #: If true, we should extract this out into a subdiagram
- self.extract: bool = False
- #: If true, all of this element's children have been filled out
- self.complete: bool = False
-
- def mark_for_extraction(
- self, el_id: int, state: "ConverterState", name: str = None, force: bool = False
- ):
- """
- Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram
- :param el_id: id of the element
- :param state: element/diagram state tracker
- :param name: name to use for this element's text
- :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the
- root element when we know we're finished
- """
- self.extract = True
-
- # Set the name
- if not self.name:
- if name:
- # Allow forcing a custom name
- self.name = name
- elif self.element.customName:
- self.name = self.element.customName
- else:
- self.name = ""
-
- # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children
- # to be added
- # Also, if this is just a string literal etc, don't bother extracting it
- if force or (self.complete and _worth_extracting(self.element)):
- state.extract_into_diagram(el_id)
-
-
-class ConverterState:
- """
- Stores some state that persists between recursions into the element tree
- """
-
- def __init__(self, diagram_kwargs: typing.Optional[dict] = None):
- #: A dictionary mapping ParserElements to state relating to them
- self._element_diagram_states: Dict[int, ElementState] = {}
- #: A dictionary mapping ParserElement IDs to subdiagrams generated from them
- self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {}
- #: The index of the next unnamed element
- self.unnamed_index: int = 1
- #: The index of the next element. This is used for sorting
- self.index: int = 0
- #: Shared kwargs that are used to customize the construction of diagrams
- self.diagram_kwargs: dict = diagram_kwargs or {}
- self.extracted_diagram_names: Set[str] = set()
-
- def __setitem__(self, key: int, value: ElementState):
- self._element_diagram_states[key] = value
-
- def __getitem__(self, key: int) -> ElementState:
- return self._element_diagram_states[key]
-
- def __delitem__(self, key: int):
- del self._element_diagram_states[key]
-
- def __contains__(self, key: int):
- return key in self._element_diagram_states
-
- def generate_unnamed(self) -> int:
- """
- Generate a number used in the name of an otherwise unnamed diagram
- """
- self.unnamed_index += 1
- return self.unnamed_index
-
- def generate_index(self) -> int:
- """
- Generate a number used to index a diagram
- """
- self.index += 1
- return self.index
-
- def extract_into_diagram(self, el_id: int):
- """
- Used when we encounter the same token twice in the same tree. When this
- happens, we replace all instances of that token with a terminal, and
- create a new subdiagram for the token
- """
- position = self[el_id]
-
- # Replace the original definition of this element with a regular block
- if position.parent:
- ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name)
- if "item" in position.parent.kwargs:
- position.parent.kwargs["item"] = ret
- elif "items" in position.parent.kwargs:
- position.parent.kwargs["items"][position.parent_index] = ret
-
- # If the element we're extracting is a group, skip to its content but keep the title
- if position.converted.func == railroad.Group:
- content = position.converted.kwargs["item"]
- else:
- content = position.converted
-
- self.diagrams[el_id] = EditablePartial.from_call(
- NamedDiagram,
- name=position.name,
- diagram=EditablePartial.from_call(
- railroad.Diagram, content, **self.diagram_kwargs
- ),
- index=position.number,
- )
-
- del self[el_id]
-
-
-def _worth_extracting(element: pyparsing.ParserElement) -> bool:
- """
- Returns true if this element is worth having its own sub-diagram. Simply, if any of its children
- themselves have children, then its complex enough to extract
- """
- children = element.recurse()
- return any(child.recurse() for child in children)
-
-
-def _apply_diagram_item_enhancements(fn):
- """
- decorator to ensure enhancements to a diagram item (such as results name annotations)
- get applied on return from _to_diagram_element (we do this since there are several
- returns in _to_diagram_element)
- """
-
- def _inner(
- element: pyparsing.ParserElement,
- parent: typing.Optional[EditablePartial],
- lookup: ConverterState = None,
- vertical: int = None,
- index: int = 0,
- name_hint: str = None,
- show_results_names: bool = False,
- show_groups: bool = False,
- ) -> typing.Optional[EditablePartial]:
-
- ret = fn(
- element,
- parent,
- lookup,
- vertical,
- index,
- name_hint,
- show_results_names,
- show_groups,
- )
-
- # apply annotation for results name, if present
- if show_results_names and ret is not None:
- element_results_name = element.resultsName
- if element_results_name:
- # add "*" to indicate if this is a "list all results" name
- element_results_name += "" if element.modalResults else "*"
- ret = EditablePartial.from_call(
- railroad.Group, item=ret, label=element_results_name
- )
-
- return ret
-
- return _inner
-
-
-def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]):
- non_diagramming_exprs = (
- pyparsing.ParseElementEnhance,
- pyparsing.PositionToken,
- pyparsing.And._ErrorStop,
- )
- return [
- e
- for e in exprs
- if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs))
- ]
-
-
-@_apply_diagram_item_enhancements
-def _to_diagram_element(
- element: pyparsing.ParserElement,
- parent: typing.Optional[EditablePartial],
- lookup: ConverterState = None,
- vertical: int = None,
- index: int = 0,
- name_hint: str = None,
- show_results_names: bool = False,
- show_groups: bool = False,
-) -> typing.Optional[EditablePartial]:
- """
- Recursively converts a PyParsing Element to a railroad Element
- :param lookup: The shared converter state that keeps track of useful things
- :param index: The index of this element within the parent
- :param parent: The parent of this element in the output tree
- :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default),
- it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never
- do so
- :param name_hint: If provided, this will override the generated name
- :param show_results_names: bool flag indicating whether to add annotations for results names
- :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed
- :param show_groups: bool flag indicating whether to show groups using bounding box
- """
- exprs = element.recurse()
- name = name_hint or element.customName or element.__class__.__name__
-
- # Python's id() is used to provide a unique identifier for elements
- el_id = id(element)
-
- element_results_name = element.resultsName
-
- # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram
- if not element.customName:
- if isinstance(
- element,
- (
- # pyparsing.TokenConverter,
- # pyparsing.Forward,
- pyparsing.Located,
- ),
- ):
- # However, if this element has a useful custom name, and its child does not, we can pass it on to the child
- if exprs:
- if not exprs[0].customName:
- propagated_name = name
- else:
- propagated_name = None
-
- return _to_diagram_element(
- element.expr,
- parent=parent,
- lookup=lookup,
- vertical=vertical,
- index=index,
- name_hint=propagated_name,
- show_results_names=show_results_names,
- show_groups=show_groups,
- )
-
- # If the element isn't worth extracting, we always treat it as the first time we say it
- if _worth_extracting(element):
- if el_id in lookup:
- # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate,
- # so we have to extract it into a new diagram.
- looked_up = lookup[el_id]
- looked_up.mark_for_extraction(el_id, lookup, name=name_hint)
- ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name)
- return ret
-
- elif el_id in lookup.diagrams:
- # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we
- # just put in a marker element that refers to the sub-diagram
- ret = EditablePartial.from_call(
- railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
- )
- return ret
-
- # Recursively convert child elements
- # Here we find the most relevant Railroad element for matching pyparsing Element
- # We use ``items=[]`` here to hold the place for where the child elements will go once created
- if isinstance(element, pyparsing.And):
- # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat
- # (all will have the same name, and resultsName)
- if not exprs:
- return None
- if len(set((e.name, e.resultsName) for e in exprs)) == 1:
- ret = EditablePartial.from_call(
- railroad.OneOrMore, item="", repeat=str(len(exprs))
- )
- elif _should_vertical(vertical, exprs):
- ret = EditablePartial.from_call(railroad.Stack, items=[])
- else:
- ret = EditablePartial.from_call(railroad.Sequence, items=[])
- elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)):
- if not exprs:
- return None
- if _should_vertical(vertical, exprs):
- ret = EditablePartial.from_call(railroad.Choice, 0, items=[])
- else:
- ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[])
- elif isinstance(element, pyparsing.Each):
- if not exprs:
- return None
- ret = EditablePartial.from_call(EachItem, items=[])
- elif isinstance(element, pyparsing.NotAny):
- ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="")
- elif isinstance(element, pyparsing.FollowedBy):
- ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="")
- elif isinstance(element, pyparsing.PrecededBy):
- ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="")
- elif isinstance(element, pyparsing.Group):
- if show_groups:
- ret = EditablePartial.from_call(AnnotatedItem, label="", item="")
- else:
- ret = EditablePartial.from_call(railroad.Group, label="", item="")
- elif isinstance(element, pyparsing.TokenConverter):
- ret = EditablePartial.from_call(
- AnnotatedItem, label=type(element).__name__.lower(), item=""
- )
- elif isinstance(element, pyparsing.Opt):
- ret = EditablePartial.from_call(railroad.Optional, item="")
- elif isinstance(element, pyparsing.OneOrMore):
- ret = EditablePartial.from_call(railroad.OneOrMore, item="")
- elif isinstance(element, pyparsing.ZeroOrMore):
- ret = EditablePartial.from_call(railroad.ZeroOrMore, item="")
- elif isinstance(element, pyparsing.Group):
- ret = EditablePartial.from_call(
- railroad.Group, item=None, label=element_results_name
- )
- elif isinstance(element, pyparsing.Empty) and not element.customName:
- # Skip unnamed "Empty" elements
- ret = None
- elif len(exprs) > 1:
- ret = EditablePartial.from_call(railroad.Sequence, items=[])
- elif len(exprs) > 0 and not element_results_name:
- ret = EditablePartial.from_call(railroad.Group, item="", label=name)
- else:
- terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName)
- ret = terminal
-
- if ret is None:
- return
-
- # Indicate this element's position in the tree so we can extract it if necessary
- lookup[el_id] = ElementState(
- element=element,
- converted=ret,
- parent=parent,
- parent_index=index,
- number=lookup.generate_index(),
- )
- if element.customName:
- lookup[el_id].mark_for_extraction(el_id, lookup, element.customName)
-
- i = 0
- for expr in exprs:
- # Add a placeholder index in case we have to extract the child before we even add it to the parent
- if "items" in ret.kwargs:
- ret.kwargs["items"].insert(i, None)
-
- item = _to_diagram_element(
- expr,
- parent=ret,
- lookup=lookup,
- vertical=vertical,
- index=i,
- show_results_names=show_results_names,
- show_groups=show_groups,
- )
-
- # Some elements don't need to be shown in the diagram
- if item is not None:
- if "item" in ret.kwargs:
- ret.kwargs["item"] = item
- elif "items" in ret.kwargs:
- # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal
- ret.kwargs["items"][i] = item
- i += 1
- elif "items" in ret.kwargs:
- # If we're supposed to skip this element, remove it from the parent
- del ret.kwargs["items"][i]
-
- # If all this items children are none, skip this item
- if ret and (
- ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0)
- or ("item" in ret.kwargs and ret.kwargs["item"] is None)
- ):
- ret = EditablePartial.from_call(railroad.Terminal, name)
-
- # Mark this element as "complete", ie it has all of its children
- if el_id in lookup:
- lookup[el_id].complete = True
-
- if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete:
- lookup.extract_into_diagram(el_id)
- if ret is not None:
- ret = EditablePartial.from_call(
- railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
- )
-
- return ret
diff --git a/spaces/Billyosoro/ESRGAN/scripts/generate_meta_info_pairdata.py b/spaces/Billyosoro/ESRGAN/scripts/generate_meta_info_pairdata.py
deleted file mode 100644
index 76dce7e41c803a8055f3627cccb98deb51419b09..0000000000000000000000000000000000000000
--- a/spaces/Billyosoro/ESRGAN/scripts/generate_meta_info_pairdata.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import argparse
-import glob
-import os
-
-
-def main(args):
- txt_file = open(args.meta_info, 'w')
- # sca images
- img_paths_gt = sorted(glob.glob(os.path.join(args.input[0], '*')))
- img_paths_lq = sorted(glob.glob(os.path.join(args.input[1], '*')))
-
- assert len(img_paths_gt) == len(img_paths_lq), ('GT folder and LQ folder should have the same length, but got '
- f'{len(img_paths_gt)} and {len(img_paths_lq)}.')
-
- for img_path_gt, img_path_lq in zip(img_paths_gt, img_paths_lq):
- # get the relative paths
- img_name_gt = os.path.relpath(img_path_gt, args.root[0])
- img_name_lq = os.path.relpath(img_path_lq, args.root[1])
- print(f'{img_name_gt}, {img_name_lq}')
- txt_file.write(f'{img_name_gt}, {img_name_lq}\n')
-
-
-if __name__ == '__main__':
- """This script is used to generate meta info (txt file) for paired images.
- """
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--input',
- nargs='+',
- default=['datasets/DF2K/DIV2K_train_HR_sub', 'datasets/DF2K/DIV2K_train_LR_bicubic_X4_sub'],
- help='Input folder, should be [gt_folder, lq_folder]')
- parser.add_argument('--root', nargs='+', default=[None, None], help='Folder root, will use the ')
- parser.add_argument(
- '--meta_info',
- type=str,
- default='datasets/DF2K/meta_info/meta_info_DIV2K_sub_pair.txt',
- help='txt path for meta info')
- args = parser.parse_args()
-
- assert len(args.input) == 2, 'Input folder should have two elements: gt folder and lq folder'
- assert len(args.root) == 2, 'Root path should have two elements: root for gt folder and lq folder'
- os.makedirs(os.path.dirname(args.meta_info), exist_ok=True)
- for i in range(2):
- if args.input[i].endswith('/'):
- args.input[i] = args.input[i][:-1]
- if args.root[i] is None:
- args.root[i] = os.path.dirname(args.input[i])
-
- main(args)
diff --git a/spaces/CVPR/Demo-Balanced-MSE/app.py b/spaces/CVPR/Demo-Balanced-MSE/app.py
deleted file mode 100644
index 1483354667724459646133a1cbeb610fd99795a4..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Demo-Balanced-MSE/app.py
+++ /dev/null
@@ -1,300 +0,0 @@
-import gradio as gr
-import matplotlib.pyplot as plt
-import torch
-import seaborn as sns
-import pandas as pd
-import os
-import os.path as osp
-import ffmpeg
-import torch.nn as nn
-import torch.nn.functional as F
-from torch.nn.modules.loss import _Loss
-from torch.utils.data import Dataset, DataLoader
-
-NUM_PER_BUCKET = 1000
-NOISE_SIGMA = 1
-Y_UB = 10
-Y_LB = 0
-K = 1
-B = 0
-NUM_SEG = 5
-NUM_EPOCHS = 100
-PRINT_FREQ = NUM_EPOCHS // 20
-NUM_TRAIN_SAMPLES = NUM_PER_BUCKET * NUM_SEG
-BATCH_SIZE = 256
-
-
-def make_dataframe(x, y, method=None):
- x = list(x[:, 0].detach().numpy())
- y = list(y[:, 0].detach().numpy())
- if method is not None:
- method = [method for _ in range(len(x))]
- df = pd.DataFrame({'x': x, 'y': y, 'Method': method})
- else:
- df = pd.DataFrame({'x': x, 'y': y})
- return df
-
-
-Y_demo = torch.linspace(Y_LB, Y_UB, 2).unsqueeze(-1)
-X_demo = (Y_demo - B) / K
-
-df_oracle = make_dataframe(X_demo, Y_demo, 'Oracle')
-
-
-def prepare_data(sel_num):
- interval = (Y_UB - Y_LB) / NUM_SEG
- all_x, all_y = [], []
- prob = []
- for i in range(NUM_SEG):
- uniform_y_distribution = torch.distributions.Uniform(Y_UB - (i + 1) * interval, Y_UB - i * interval)
- y_uniform = uniform_y_distribution.sample((NUM_TRAIN_SAMPLES, 1))[:sel_num[i]]
-
- noise_distribution = torch.distributions.Normal(loc=0, scale=NOISE_SIGMA)
- noise = noise_distribution.sample((NUM_TRAIN_SAMPLES, 1))[:sel_num[i]]
- y_uniform_oracle = y_uniform - noise
-
- x_uniform = (y_uniform_oracle - B) / K
- all_x += x_uniform
- all_y += y_uniform
- prob += [torch.tensor(sel_num[i]).float() for _ in range(sel_num[i])]
-
- all_x = torch.stack(all_x)
- all_y = torch.stack(all_y)
- prob = torch.stack(prob)
- return all_x, all_y, prob
-
-
-def unzip_dataloader(training_loader):
- all_x = []
- all_y = []
- for data, label, _ in training_loader:
- all_x.append(data)
- all_y.append(label)
- all_x = torch.cat(all_x)
- all_y = torch.cat(all_y)
- return all_x, all_y
-
-
-def train(train_loader, training_df, training_bundle, num_epochs):
- visualize_training_process(training_df, training_bundle, -1)
- for epoch in range(num_epochs):
- for model, optimizer, scheduler, criterion, criterion_name in training_bundle:
- model.train()
- for data, target, prob in train_loader:
- optimizer.zero_grad()
- pred = model(data)
- if criterion_name == 'Reweight':
- loss = criterion(pred, target, prob)
- else:
- loss = criterion(pred, target)
- loss.backward()
- optimizer.step()
- scheduler.step()
- if (epoch + 1) % PRINT_FREQ == 0:
- visualize_training_process(training_df, training_bundle, epoch)
- visualize_training_process(training_df, training_bundle, num_epochs-1, final=True)
-
-
-def visualize_training_process(training_df, training_bundle, epoch, final=False):
- df = df_oracle
- for model, optimizer, scheduler, criterion, criterion_name in training_bundle:
- model.eval()
- y = model(X_demo)
- df = df.append(make_dataframe(X_demo, y, criterion_name), ignore_index=True)
- visualize(training_df, df, 'train_log/{:05d}.png'.format(epoch + 1), fast=True, epoch=epoch)
- if final:
- visualize(training_df, df, 'regression_result.png', fast=False)
-
-
-def make_video():
- (
- ffmpeg
- .input('train_log/*.png', pattern_type='glob', framerate=3)
- .output('movie.mp4')
- .run()
- )
-
-
-class ReweightL2(_Loss):
- def __init__(self, reweight='inverse'):
- super(ReweightL2, self).__init__()
- self.reweight = reweight
-
- def forward(self, pred, target, prob):
- reweight = self.reweight
- if reweight == 'inverse':
- inv_prob = prob.pow(-1)
- elif reweight == 'sqrt_inv':
- inv_prob = prob.pow(-0.5)
- else:
- raise NotImplementedError
- inv_prob = inv_prob / inv_prob.sum()
- loss = F.mse_loss(pred, target, reduction='none').sum(-1) * inv_prob
- loss = loss.sum()
- return loss
-
-
-class LinearModel(nn.Module):
- def __init__(self, input_dim, output_dim):
- super(LinearModel, self).__init__()
- self.mlp = nn.Sequential(
- nn.Linear(input_dim, output_dim),
- )
-
- def forward(self, x):
- x = self.mlp(x)
- return x
-
-
-def prepare_model():
- model = LinearModel(input_dim=1, output_dim=1)
- optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.9)
- scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=NUM_EPOCHS)
- return model, optimizer, scheduler
-
-
-class BMCLoss(_Loss):
- def __init__(self):
- super(BMCLoss, self).__init__()
- self.noise_sigma = NOISE_SIGMA
-
- def forward(self, pred, target):
- pred = pred.reshape(-1, 1)
- target = target.reshape(-1, 1)
- noise_var = self.noise_sigma ** 2
- loss = bmc_loss(pred, target, noise_var)
- return loss
-
-
-def bmc_loss(pred, target, noise_var):
- logits = - 0.5 * (pred - target.T).pow(2) / noise_var
- loss = F.cross_entropy(logits, torch.arange(pred.shape[0]))
-
- return loss * (2 * noise_var)
-
-
-def regress(train_loader, training_df):
- training_bundle = []
- criterions = {
- 'MSE': torch.nn.MSELoss(),
- 'Reweight': ReweightL2(),
- 'Balanced MSE': BMCLoss(),
- }
- for criterion_name in criterions:
- criterion = criterions[criterion_name]
- model, optimizer, scheduler = prepare_model()
- training_bundle.append((model, optimizer, scheduler, criterion, criterion_name))
- train(train_loader, training_df, training_bundle, NUM_EPOCHS)
-
-
-class DummyDataset(Dataset):
- def __init__(self, inputs, targets, prob):
- self.inputs = inputs
- self.targets = targets
- self.prob = prob
-
- def __getitem__(self, index):
- return self.inputs[index], self.targets[index], self.prob[index]
-
- def __len__(self):
- return len(self.inputs)
-
-
-def visualize(training_df, df, save_path, fast=False, epoch=None):
- if fast:
- f = plt.figure(figsize=(3, 3))
- g = f.add_subplot(111)
- g_line = sns.lineplot(data=df, x='x', y='y', hue='Method', ax=g, estimator=None, ci=None)
- plt.xlim((Y_LB - B) / K, (Y_UB - B) / K)
- plt.ylim(Y_LB, Y_UB)
- else:
- g = sns.jointplot(data=training_df, x='x', y='y', color='#003ea1', alpha=0.1, linewidths=0, s=50,
- marginal_kws=dict(bins=torch.linspace(Y_LB, Y_UB, steps=NUM_SEG + 1)),
- xlim=((Y_LB - B) / K, (Y_UB - B) / K),
- ylim=(Y_LB, Y_UB),
- space=0.1,
- height=5,
- ratio=2,
- estimator=None, ci=None,
- legend=False,
- )
- g.ax_marg_x.remove()
- g_line = sns.lineplot(data=df, x='x', y='y', hue='Method', ax=g.ax_joint, estimator=None, ci=None)
- if epoch is not None:
- g_line.legend(loc='upper left', title="Epoch {:03d}".format(epoch+1))
- else:
- g_line.legend(loc='upper left')
- plt.gca().axes.set_xlabel(r'$x$')
- plt.gca().axes.set_ylabel(r'$y$')
-
- plt.savefig(save_path, bbox_inches='tight', dpi=200)
- plt.close()
-
-
-def clean_up_logs():
- if not osp.exists('train_log'):
- os.mkdir('train_log')
- for f in os.listdir('train_log'):
- os.remove(osp.join('train_log', f))
- for f in ['regression_result.png', 'training_data.png', 'movie.mp4']:
- if osp.isfile(f):
- os.remove(f)
-
-
-def run(num1, num2, num3, num4, num5, random_seed, mode):
- sel_num = [num1, num2, num3, num4, num5]
- sel_num = [int(num / 100 * NUM_PER_BUCKET) for num in sel_num]
- torch.manual_seed(int(random_seed))
- all_x, all_y, prob = prepare_data(sel_num)
- train_loader = DataLoader(DummyDataset(all_x, all_y, prob), BATCH_SIZE, shuffle=True)
- training_df = make_dataframe(all_x, all_y)
-
- clean_up_logs()
- if mode == 0:
- visualize(training_df, df_oracle, 'training_data.png')
- if mode == 1:
- regress(train_loader, training_df)
- make_video()
- if mode == 0:
- text = "Press \"Start Regressing\" if your are happy with the training data. Regression takes ~30s."
- else:
- text = "Press \"Prepare Training Data\" before moving the sliders. You may also change the random seed."
- training_data_plot = 'training_data.png' if mode == 0 else None
- output = 'regression_result.png'.format(NUM_EPOCHS) if mode == 1 else None
- video = "movie.mp4" if mode == 1 else None
- return training_data_plot, output, video, text
-
-
-if __name__ == '__main__':
- iface = gr.Interface(
- fn=run,
- inputs=[
- gr.inputs.Slider(0, 100, default=20, step=0.1, label='Label percentage in [8, 10)'),
- gr.inputs.Slider(0, 100, default=20, step=0.1, label='Label percentage in [6, 8)'),
- gr.inputs.Slider(0, 100, default=20, step=0.1, label='Label percentage in [4, 6)'),
- gr.inputs.Slider(0, 100, default=20, step=0.1, label='Label percentage in [2, 4)'),
- gr.inputs.Slider(0, 100, default=20, step=0.1, label='Label percentage in [0, 2)'),
- gr.inputs.Number(default=0, label='Random Seed', optional=False),
- gr.inputs.Radio(['Prepare Training Data', 'Start Regressing!'],
- type="index", default=None, label='Mode', optional=False),
- ],
- outputs=[
- gr.outputs.Image(type="file", label="Training data"),
- gr.outputs.Image(type="file", label="Regression result"),
- gr.outputs.Video(type='mp4', label='Training process'),
- gr.outputs.Textbox(type="auto", label='What\' s next?')
- ],
- live=True,
- allow_flagging='never',
- title="Balanced MSE for Imbalanced Visual Regression [CVPR 2022]",
- description="Welcome to the demo of Balanced MSE ⚖. In this demo, we will work on a simple task: imbalanced linear regression. "
- "To get started, move the sliders 🎚 to create your training data "
- "or click the examples 📕 at the bottom of the page 👇👇",
- examples=[
- [0.1, 0.8, 6.4, 51.2, 100, 0, 'Prepare Training Data'],
- [1, 10, 100, 10, 1, 0, 'Prepare Training Data'],
- ],
- css=".output-image, .image-preview {height: 500px !important}",
- article="