signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def decrypt(*args, **kwargs):
try:<EOL><INDENT>return legacy_decrypt(*args, **kwargs)<EOL><DEDENT>except (NotYetValid, Expired) as e:<EOL><INDENT>raise e<EOL><DEDENT>except (Error, ValueError) as e:<EOL><INDENT>return spec_compliant_decrypt(*args, **kwargs)<EOL><DEDENT>
Decrypts legacy or spec-compliant JOSE token. First attempts to decrypt the token in a legacy mode (https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-19). If it is not a valid legacy token then attempts to decrypt it in a spec-compliant way (http://tools.ietf.org/html/rfc7519)
f1673:m14
def sign(claims, jwk, add_header=None, alg='<STR_LIT>'):
(hash_fn, _), mod = JWA[alg]<EOL>header = dict(list((add_header or {}).items()) + [(HEADER_ALG, alg)])<EOL>header, payload = list(map(b64encode_url, list(map(json_encode, (header, claims)))))<EOL>sig = b64encode_url(hash_fn(_jws_hash_str(header, payload), jwk['<STR_LIT:k>'],<EOL>mod=mod))<EOL>return JWS(header, payload, sig)<EOL>
Signs the given claims and produces a :class:`~jose.JWS` :param claims: A `dict` representing the claims for this :class:`~jose.JWS`. :param jwk: A `dict` representing the JWK to be used for signing of the :class:`~jose.JWS`. This parameter is algorithm-specific. :parameter add_header: Additional items to be added to the header. Additional headers *will* be authenticated. :parameter alg: The algorithm to use to produce the signature. :rtype: :class:`~jose.JWS`
f1673:m15
def verify(jws, jwk, alg, validate_claims=True, expiry_seconds=None):
header, payload, sig = list(map(b64decode_url, jws))<EOL>header = json_decode(header)<EOL>if alg != header[HEADER_ALG]:<EOL><INDENT>raise Error('<STR_LIT>')<EOL><DEDENT>(_, verify_fn), mod = JWA[header[HEADER_ALG]]<EOL>if not verify_fn(_jws_hash_str(jws.header, jws.payload),<EOL>jwk['<STR_LIT:k>'], sig, mod=mod):<EOL><INDENT>raise Error('<STR_LIT>')<EOL><DEDENT>claims = json_decode(b64decode_url(jws.payload))<EOL>_validate(claims, validate_claims, expiry_seconds)<EOL>return JWT(header, claims)<EOL>
Verifies the given :class:`~jose.JWS` :param jws: The :class:`~jose.JWS` to be verified. :param jwk: A `dict` representing the JWK to use for verification. This parameter is algorithm-specific. :param alg: The algorithm to verify the signature with. :param validate_claims: A `bool` indicating whether or not the `exp`, `iat` and `nbf` claims should be validated. Defaults to `True`. :param expiry_seconds: An `int` containing the JWT expiry in seconds, used when evaluating the `iat` claim. Defaults to `None`, which disables `iat` claim validation. :rtype: :class:`~jose.JWT` :raises: :class:`~jose.Expired` if the JWT has expired :raises: :class:`~jose.NotYetValid` if the JWT is not yet valid :raises: :class:`~jose.Error` if there is an error decrypting the JWE
f1673:m16
def b64decode_url(istr):
istr = encode_safe(istr)<EOL>try:<EOL><INDENT>return urlsafe_b64decode(istr + '<STR_LIT:=>' * (<NUM_LIT:4> - (len(istr) % <NUM_LIT:4>)))<EOL><DEDENT>except TypeError as e:<EOL><INDENT>raise Error('<STR_LIT>' % (e))<EOL><DEDENT>
JWT Tokens may be truncated without the usual trailing padding '=' symbols. Compensate by padding to the nearest 4 bytes.
f1673:m17
def b64encode_url(istr):
return urlsafe_b64encode(encode_safe(istr)).rstrip('<STR_LIT:=>')<EOL>
JWT Tokens may be truncated without the usual trailing padding '=' symbols. Compensate by padding to the nearest 4 bytes.
f1673:m18
def _validate(claims, validate_claims, expiry_seconds):
if not validate_claims:<EOL><INDENT>return<EOL><DEDENT>now = time()<EOL>try:<EOL><INDENT>expiration_time = claims[CLAIM_EXPIRATION_TIME]<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>_check_expiration_time(now, expiration_time)<EOL><DEDENT>try:<EOL><INDENT>issued_at = claims[CLAIM_ISSUED_AT]<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>if expiry_seconds is not None:<EOL><INDENT>_check_expiration_time(now, issued_at + expiry_seconds)<EOL><DEDENT><DEDENT>try:<EOL><INDENT>not_before = claims[CLAIM_NOT_BEFORE]<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>_check_not_before(now, not_before)<EOL><DEDENT>
Validate expiry related claims. If validate_claims is False, do nothing. Otherwise, validate the exp and nbf claims if they are present, and validate the iat claim if expiry_seconds is provided.
f1673:m35
def __getitem__(self, key):
if key in self._impl:<EOL><INDENT>return self._impl[key]<EOL><DEDENT>enc, hash = self._compound_from_key(key)<EOL>return self._impl[enc], self._impl[hash]<EOL>
Derive implementation(s) from key If key is compond <encryption>(-|+)<hash> then it will return a tuple of implementations. Each implementation is a tuple in following format: - for hash algorithms ((<sign function>, <verify function>), <mod>) - for encryption algorithms ((<encrypt function>, <decrypt function>), <key length>)
f1673:c3:m0
def calculate_mean_vectors(X, y):
return [np.mean(X[y == cl, :], axis=<NUM_LIT:0>) for cl in np.unique(y)]<EOL>
Calculates the mean samples per class Parameters: ----------- X : array-like, shape (m, n) - the samples y : array-like, shape (m, ) - the class labels Returns: -------- mean_vectors : array-like, shape (k, ) Those are the mean samples from each k classes.
f1680:m0
def calculate_within_class_scatter_matrix(X, y):
mean_vectors = calculate_mean_vectors(X, y)<EOL>n_features = X.shape[<NUM_LIT:1>]<EOL>Sw = np.zeros((n_features, n_features))<EOL>for cl, m in zip(np.unique(y), mean_vectors):<EOL><INDENT>Si = np.zeros((n_features, n_features))<EOL>m = m.reshape(n_features, <NUM_LIT:1>)<EOL>for x in X[y == cl, :]:<EOL><INDENT>v = x.reshape(n_features, <NUM_LIT:1>) - m<EOL>Si += v @ v.T<EOL><DEDENT>Sw += Si<EOL><DEDENT>return Sw<EOL>
Calculates the Within-Class Scatter matrix Parameters: ----------- X : array-like, shape (m, n) - the samples y : array-like, shape (m, ) - the class labels Returns: -------- within_class_scatter_matrix : array-like, shape (n, n)
f1680:m1
def calculate_between_class_scatter_matrix(X, y):
mean_vectors = calculate_mean_vectors(X, y)<EOL>n_features = X.shape[<NUM_LIT:1>]<EOL>Sb = np.zeros((n_features, n_features))<EOL>m = np.mean(X, axis=<NUM_LIT:0>).reshape(n_features, <NUM_LIT:1>)<EOL>for cl, m_i in zip(np.unique(y), mean_vectors):<EOL><INDENT>v = m_i.reshape(n_features, <NUM_LIT:1>) - m<EOL>Sb += X[y == cl, :].shape[<NUM_LIT:0>] * v @ v.T<EOL><DEDENT>return Sb<EOL>
Calculates the Between-Class Scatter matrix Parameters: ----------- X : array-like, shape (m, n) - the samples y : array-like, shape (m, ) - the class labels Returns: -------- between_class_scatter_matrix : array-like, shape (n, n)
f1680:m2
def calculate_covariance_matrix(X):
n_features = X.shape[<NUM_LIT:1>] <EOL>S = np.zeros((n_features, n_features))<EOL>m = np.mean(X, axis=<NUM_LIT:0>).reshape(n_features, <NUM_LIT:1>)<EOL>for x in X:<EOL><INDENT>v = x.reshape(n_features, <NUM_LIT:1>) - m<EOL>S += v @ v.T<EOL><DEDENT>return <NUM_LIT:1>/(X.shape[<NUM_LIT:0>]-<NUM_LIT:1>) * S<EOL>
Calculates the Variance-Covariance matrix Parameters: ----------- X : array-like, shape (m, n) - the data Returns: -------- variance_covariance_matrix : array-like, shape(n, n)
f1680:m3
def _init_random_gaussians(self, X):
n_samples = np.shape(X)[<NUM_LIT:0>]<EOL>self.priors = (<NUM_LIT:1> / self.k) * np.ones(self.k)<EOL>for _ in range(self.k):<EOL><INDENT>params = {}<EOL>params["<STR_LIT>"] = X[np.random.choice(range(n_samples))]<EOL>params["<STR_LIT>"] = calculate_covariance_matrix(X)<EOL>self.parameters.append(params)<EOL><DEDENT>
Initialize gaussian randomly
f1686:c0:m1
def multivariate_gaussian(self, X, params):
n_features = np.shape(X)[<NUM_LIT:1>]<EOL>mean = params["<STR_LIT>"]<EOL>covar = params["<STR_LIT>"]<EOL>determinant = np.linalg.det(covar)<EOL>likelihoods = np.zeros(np.shape(X)[<NUM_LIT:0>])<EOL>for i, sample in enumerate(X):<EOL><INDENT>d = n_features <EOL>coeff = (<NUM_LIT:1.0> / ((<NUM_LIT> * np.pi) ** (d / <NUM_LIT:2>)) * np.sqrt(determinant))<EOL>exponent = np.exp(-<NUM_LIT:0.5> * (sample - mean).T.dot(np.linalg.pinv(covar)).dot((sample - mean)))<EOL>likelihoods[i] = coeff * exponent<EOL><DEDENT>return likelihoods<EOL>
Likelihood
f1686:c0:m2
def _get_likelihoods(self, X):
n_samples = np.shape(X)[<NUM_LIT:0>]<EOL>likelihoods = np.zeros((n_samples, self.k))<EOL>for i in range(self.k):<EOL><INDENT>likelihoods[:, i] = self.multivariate_gaussian(X, self.parameters[i])<EOL><DEDENT>return likelihoods<EOL>
Calculate the likelihood over all samples
f1686:c0:m3
def _expectation(self, X):
<EOL>weighted_likelihoods = self._get_likelihoods(X) * self.priors<EOL>sum_likelihoods = np.expand_dims(np.sum(weighted_likelihoods, axis=<NUM_LIT:1>), axis=<NUM_LIT:1>)<EOL>self.responsibility = weighted_likelihoods / sum_likelihoods<EOL>self.sample_assignments = self.responsibility.argmax(axis=<NUM_LIT:1>)<EOL>self.responsibilities.append(np.max(self.responsibility, axis=<NUM_LIT:1>))<EOL>
Calculate the responsibility
f1686:c0:m4
def _maximization(self, X):
<EOL>for i in range(self.k):<EOL><INDENT>resp = np.expand_dims(self.responsibility[:, i], axis=<NUM_LIT:1>)<EOL>mean = (resp * X).sum(axis=<NUM_LIT:0>) / resp.sum()<EOL>covariance = (X - mean).T.dot((X - mean) * resp) / resp.sum()<EOL>self.parameters[i]["<STR_LIT>"], self.parameters[i]["<STR_LIT>"] = mean, covariance<EOL><DEDENT>n_samples = np.shape(X)[<NUM_LIT:0>]<EOL>self.priors = self.responsibility.sum(axis=<NUM_LIT:0>) / n_samples<EOL>
Update the parameters and priors
f1686:c0:m5
def _converged(self, X):
if len(self.responsibilities) < <NUM_LIT:2>:<EOL><INDENT>return False<EOL><DEDENT>diff = np.linalg.norm(self.responsibilities[-<NUM_LIT:1>] - self.responsibilities[-<NUM_LIT:2>])<EOL>return diff <= self.tolerance<EOL>
Covergence if || likehood - last_likelihood || < tolerance
f1686:c0:m6
def cluster(self, X):
<EOL>self._init_random_gaussians(X)<EOL>for _ in range(self.max_iterations):<EOL><INDENT>self._expectation(X) <EOL>self._maximization(X) <EOL>if self._converged(X):<EOL><INDENT>break<EOL><DEDENT><DEDENT>self._expectation(X)<EOL>return self.sample_assignments<EOL>
Run GMM and return the cluster indices
f1686:c0:m7
def _calc_distortion(self):
m = self._X.shape[<NUM_LIT:0>]<EOL>self.distortion = <NUM_LIT:1>/m * sum(<EOL>linalg.norm(self._X[i, :] - self.centroids[self.clusters[i]])**<NUM_LIT:2> for i in range(m)<EOL>)<EOL>return self.distortion<EOL>
Calculates the distortion value of the current clusters
f1689:c0:m1
def _init_random_centroids(self):
self.centroids = self._X[np.random.choice(list(range(self._X.shape[<NUM_LIT:0>])), size=self.n_clusters), :]<EOL>
Initialize the centroids as k random samples of X (k = n_clusters)
f1689:c0:m2
def _move_centroids(self):
for k in range(self.n_clusters):<EOL><INDENT>if k in self.clusters:<EOL><INDENT>centroid = np.mean(self._X[self.clusters == k, :], axis=<NUM_LIT:0>)<EOL>self.centroids[k] = centroid<EOL><DEDENT>else:<EOL><INDENT>self.n_clusters-=<NUM_LIT:1><EOL>self.centroids = self.centroids[:self.n_clusters]<EOL>self.clusters-=<NUM_LIT:1><EOL>k-=<NUM_LIT:1><EOL><DEDENT><DEDENT>
Calculate new centroids as the means of the samples in each cluster
f1689:c0:m3
def _closest_centroid(self, x):
closest_centroid = <NUM_LIT:0><EOL>distance = <NUM_LIT:10>^<NUM_LIT:9><EOL>for i in range(self.n_clusters):<EOL><INDENT>current_distance = linalg.norm(x - self.centroids[i])<EOL>if current_distance < distance:<EOL><INDENT>closest_centroid = i<EOL>distance = current_distance<EOL><DEDENT><DEDENT>return closest_centroid<EOL>
Returns the index of the closest centroid to the sample
f1689:c0:m4
def _assign_clusters(self):
self.clusters = np.array([self._closest_centroid(x) for x in self._X])<EOL>
Assign the samples to the closest centroids to create clusters
f1689:c0:m5
def fit(self, X):
self._X = super().cluster(X)<EOL>candidates = []<EOL>for _ in range(self.n_runs):<EOL><INDENT>self._init_random_centroids()<EOL>while True:<EOL><INDENT>prev_clusters = self.clusters<EOL>self._assign_clusters()<EOL>self._move_centroids()<EOL>if np.all(prev_clusters == self.clusters):<EOL><INDENT>break<EOL><DEDENT><DEDENT>self._calc_distortion()<EOL>candidates.append((self.distortion, self.centroids, self.clusters))<EOL><DEDENT>candidates.sort(key=lambda x: x[<NUM_LIT:0>])<EOL>self.distortion = candidates[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>self.centroids = candidates[<NUM_LIT:0>][<NUM_LIT:1>]<EOL>self.clusters = candidates[<NUM_LIT:0>][<NUM_LIT:2>]<EOL>return self<EOL>
The K-Means itself
f1689:c0:m6
def _compute_std_dev(self, X):
self._sigma = []<EOL>if X.shape[<NUM_LIT:0>] <= <NUM_LIT:1>:<EOL><INDENT>self._sigma = [<NUM_LIT:0.0>]<EOL><DEDENT>else:<EOL><INDENT>for x_mean in range(X.shape[<NUM_LIT:0>]):<EOL><INDENT>std_dev = np.sqrt(sum([np.linalg.norm(x - x_mean) ** <NUM_LIT:2> for x in X]) / float(X.shape[<NUM_LIT:0>]-<NUM_LIT:1>))<EOL>self._sigma.append(std_dev)<EOL><DEDENT><DEDENT>return self._sigma<EOL>
Computes the standard deviation of a Gaussian Distribution with mean vector X[i]
f1690:c0:m1
def _high_dim_sim(self, v, w, normalize=False, X=None, idx=<NUM_LIT:0>):
sim = np.exp((-np.linalg.norm(v - w) ** <NUM_LIT:2>) / (<NUM_LIT:2>*self._sigma[idx] ** <NUM_LIT:2>))<EOL>if normalize:<EOL><INDENT>return sim / sum(map(lambda x: x[<NUM_LIT:1>], self._knn(idx, X, high_dim=True)))<EOL><DEDENT>else:<EOL><INDENT>return sim<EOL><DEDENT>
Similarity measurement based on Gaussian Distribution
f1690:c0:m2
def _low_dim_sim(self, v, w, normalize=False, Y=None, idx=<NUM_LIT:0>):
sim = (<NUM_LIT:1> + np.linalg.norm(v - w) ** <NUM_LIT:2>) ** -<NUM_LIT:1><EOL>if normalize:<EOL><INDENT>return sim / sum(map(lambda x: x[<NUM_LIT:1>], self._knn(idx, Y, high_dim=False)))<EOL><DEDENT>else:<EOL><INDENT>return sim<EOL><DEDENT>
Similarity measurement based on (Student) t-Distribution
f1690:c0:m3
def _knn(self, i, X, high_dim=True):
knns = []<EOL>for j in range(X.shape[<NUM_LIT:0>]):<EOL><INDENT>if j != i:<EOL><INDENT>if high_dim:<EOL><INDENT>distance = self._high_dim_sim(X[i], X[j], idx=i)<EOL><DEDENT>else:<EOL><INDENT>distance = self._low_dim_sim(X[i], X[j])<EOL><DEDENT>knns.append([j, distance])<EOL><DEDENT><DEDENT>return sorted(knns, key=lambda x: x[<NUM_LIT:1>])[:self.perplexity]<EOL>
Performs KNN search based on high/low-dimensional similarity/distance measurement
f1690:c0:m4
def fit(self, X):
<EOL>self._compute_std_dev(X)<EOL>kl_cost = KL_Divergence()<EOL>high_dim_dist = self._get_high_dim_dist(X)<EOL>Y = np.random.randn(X.shape[<NUM_LIT:0>], self.n_components)<EOL>prev_Ys = [Y, Y]<EOL>for iteration in range(<NUM_LIT:1>, self.n_iter+<NUM_LIT:1>):<EOL><INDENT>low_dim_dist = self._get_low_dim_dist(Y)<EOL>for i in range(Y.shape[<NUM_LIT:0>]):<EOL><INDENT>grad = kl_cost.gradient(high_dim_dist, low_dim_dist, Y, i)<EOL>Y[i] = prev_Ys[<NUM_LIT:1>][i] + self.learning_rate * grad + self.momentum * (prev_Ys[<NUM_LIT:1>][i] - prev_Ys[<NUM_LIT:0>][i])<EOL><DEDENT>prev_Ys = [prev_Ys[<NUM_LIT:1>], Y]<EOL>if iteration % <NUM_LIT:100> == <NUM_LIT:0> and self.verbose:<EOL><INDENT>low_dim_dist = self._get_low_dim_dist(Y)<EOL>print(f"<STR_LIT>")<EOL><DEDENT><DEDENT>self.embeddings = Y<EOL>return self<EOL>
Gradient Descent optimization process Tunes the embeddings (Y) so that their pairwise distance distribution matches the input high-dimensional data (X) pairwise distance distribution. In other words, minimizes the KL divergence cost.
f1690:c0:m7
def transform(self, X):
return self.embeddings<EOL>
Returns the learned low-dimensional embeddings of the high-dimensional data
f1690:c0:m8
def accuracy_score(y, y_pred):
y, y_pred = convert_assert(y, y_pred)<EOL>return np.count_nonzero(y == y_pred) / y.size<EOL>
Calculates the fraction of the correctly classified samples over all. Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- accuracy : float number, the fraction of the correctly classified samples over all
f1696:m0
def true_positives(y, y_pred):
y, y_pred = convert_assert(y, y_pred)<EOL>assert_binary_problem(y)<EOL>return np.count_nonzero(y_pred[y == <NUM_LIT:1>] == <NUM_LIT:1>)<EOL>
True-positives Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- tp : integer, the number of true-positives
f1696:m1
def false_positives(y, y_pred):
y, y_pred = convert_assert(y, y_pred)<EOL>assert_binary_problem(y)<EOL>return np.count_nonzero(y_pred[y == <NUM_LIT:0>] == <NUM_LIT:1>)<EOL>
False-positives Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- fp : integer, the number of false-positives
f1696:m2
def true_negatives(y, y_pred):
y, y_pred = convert_assert(y, y_pred)<EOL>assert_binary_problem(y)<EOL>return np.count_nonzero(y_pred[y == <NUM_LIT:0>] == <NUM_LIT:0>)<EOL>
True-negatives Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- tn : integer, the number of true-negatives
f1696:m3
def false_negatives(y, y_pred):
y, y_pred = convert_assert(y, y_pred)<EOL>assert_binary_problem(y)<EOL>return np.count_nonzero(y_pred[y == <NUM_LIT:1>] == <NUM_LIT:0>)<EOL>
False-negatives Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- fn : integer, the number of false-negatives
f1696:m4
def precision(y, y_pred):
tp = true_positives(y, y_pred)<EOL>fp = false_positives(y, y_pred)<EOL>return tp / (tp + fp)<EOL>
Precision score precision = true_positives / (true_positives + false_positives) Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- precision : float
f1696:m5
def recall(y, y_pred):
tp = true_positives(y, y_pred)<EOL>fn = false_negatives(y, y_pred)<EOL>return tp / (tp + fn)<EOL>
Recall score recall = true_positives / (true_positives + false_negatives) Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- recall : float
f1696:m6
def f1_score(y, y_pred):
p = precision(y, y_pred)<EOL>r = recall(y, y_pred)<EOL>return <NUM_LIT:2>*p*r / (p+r)<EOL>
F1 score f1_score = 2 * precision*recall / (precision + recall) Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- f1_score : float
f1696:m7
def squared_error(y, y_pred):
y, y_pred = convert_assert(y, y_pred)<EOL>return np.sum((y - y_pred) ** <NUM_LIT:2>)<EOL>
Calculates the sum of the squared differences between target and prediction. Parameters: ----------- y : vector, shape (n_samples,) The target values. y_pred : vector, shape (n_samples,) The predicted values. Returns: -------- error : float number, the sum of the squared differences between target and prediction
f1697:m0
def mean_squared_error(y, y_pred):
return squared_error(y, y_pred) / len(y)<EOL>
Calculates the mean squared difference between target and prediction. Parameters: ----------- y : vector, shape (n_samples,) The target values. y_pred : vector, shape (n_samples,) The predicted values. Returns: -------- error : float number, the mean squared difference between target and prediction
f1697:m1
def absolute_error(y, y_pred):
y, y_pred = convert_assert(y, y_pred)<EOL>return np.sum(y - y_pred)<EOL>
Calculates the sum of the differences between target and prediction. Parameters: ----------- y : vector, shape (n_samples,) The target values. y_pred : vector, shape (n_samples,) The predicted values. Returns: -------- error : float number, sum of the differences between target and prediction
f1697:m2
def mean_absolute_error(y, y_pred):
return absolute_error(y, y_pred) / len(y)<EOL>
Calculates the mean difference between target and prediction. Parameters: ----------- y : vector, shape (n_samples,) The target values. y_pred : vector, shape (n_samples,) The predicted values. Returns: -------- error : float number, the mean difference between target and prediction
f1697:m3
def plot_decision_boundary(model, X, y, step=<NUM_LIT:0.1>, figsize=(<NUM_LIT:10>, <NUM_LIT:8>), alpha=<NUM_LIT>, size=<NUM_LIT:20>):
x_min, x_max = X[:, <NUM_LIT:0>].min() - <NUM_LIT:1>, X[:, <NUM_LIT:0>].max() + <NUM_LIT:1><EOL>y_min, y_max = X[:, <NUM_LIT:1>].min() - <NUM_LIT:1>, X[:, <NUM_LIT:1>].max() + <NUM_LIT:1><EOL>xx, yy = np.meshgrid(np.arange(x_min, x_max, step),<EOL>np.arange(y_min, y_max, step))<EOL>f, ax = plt.subplots(figsize=figsize)<EOL>Z = model.predict(np.c_[xx.ravel(), yy.ravel()])<EOL>Z = Z.reshape(xx.shape)<EOL>ax.contourf(xx, yy, Z, alpha=alpha)<EOL>ax.scatter(X[:, <NUM_LIT:0>], X[:, <NUM_LIT:1>], c=y, s=size, edgecolor='<STR_LIT:k>')<EOL>plt.show()<EOL>
Plots the classification decision boundary of `model` on `X` with labels `y`. Using numpy and matplotlib.
f1705:m0
def visualize(self):
print_tree(self.root)<EOL>
Decision Tree visualization.
f1706:c0:m6
def visualize(self):
print_tree(self.root)<EOL>
Decision Tree visualization.
f1707:c0:m6
def visualize(self):
print_tree(self.root)<EOL>
Decision Tree visualization.
f1708:c0:m6
def prop(x, s):
return list(s).count(x)/len(s)<EOL>
Returns the proportion of `x` in `s`.
f1709:m0
def gini_impurity(s):
return <NUM_LIT:1> - sum(prop(s[i], s)**<NUM_LIT:2> for i in range(len(s)))<EOL>
Calculate the Gini Impurity for a list of samples. See: https://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity
f1709:m1
def entropy(s):
return -sum(<EOL>p*np.log(p) for i in range(len(s)) for p in [prop(s[i], s)]<EOL>)<EOL>
Calculate the Entropy Impurity for a list of samples.
f1709:m2
def info_gain(current_impurity, true_branch, false_branch, criterion):
measure_impurity = gini_impurity if criterion == "<STR_LIT>" else entropy<EOL>p = float(len(true_branch)) / (len(true_branch) + len(false_branch))<EOL>return current_impurity - p * measure_impurity(true_branch) - (<NUM_LIT:1> - p) * measure_impurity(false_branch)<EOL>
Information Gain. The uncertainty of the starting node, minus the weighted impurity of two child nodes.
f1709:m3
@staticmethod<EOL><INDENT>def is_numeric(value):<DEDENT>
return type(value) in [<EOL>int,<EOL>float,<EOL>np.int8,<EOL>np.int16,<EOL>np.int32,<EOL>np.int64,<EOL>np.float16,<EOL>np.float32,<EOL>np.float64,<EOL>np.float128<EOL>]<EOL>
Test if a value is numeric.
f1710:c0:m0
def split(X, Y, question):
true_X, false_X = [], []<EOL>true_Y, false_Y = [], []<EOL>for x, y in zip(X, Y):<EOL><INDENT>if question.match(x):<EOL><INDENT>true_X.append(x)<EOL>true_Y.append(y)<EOL><DEDENT>else:<EOL><INDENT>false_X.append(x)<EOL>false_Y.append(y)<EOL><DEDENT><DEDENT>return (np.array(true_X), np.array(false_X),<EOL>np.array(true_Y), np.array(false_Y))<EOL>
Partitions a dataset. For each row in the dataset, check if it matches the question. If so, add it to 'true rows', otherwise, add it to 'false rows'.
f1711:m0
def find_best_question(X, y, criterion):
measure_impurity = gini_impurity if criterion == "<STR_LIT>" else entropy<EOL>current_impurity = measure_impurity(y)<EOL>best_info_gain = <NUM_LIT:0><EOL>best_question = None<EOL>for feature_n in range(X.shape[<NUM_LIT:1>]):<EOL><INDENT>for value in set(X[:, feature_n]):<EOL><INDENT>q = Question(feature_n, value)<EOL>_, _, true_y, false_y = split(X, y, q)<EOL>current_info_gain = info_gain(current_impurity, true_y, false_y, criterion)<EOL>if current_info_gain >= best_info_gain:<EOL><INDENT>best_info_gain = current_info_gain<EOL>best_question = q<EOL><DEDENT><DEDENT><DEDENT>return best_info_gain, best_question<EOL>
Find the best question to ask by iterating over every feature / value and calculating the information gain.
f1711:m1
def build_tree(X, y, criterion, max_depth, current_depth=<NUM_LIT:1>):
<EOL>if max_depth >= <NUM_LIT:0> and current_depth >= max_depth:<EOL><INDENT>return Leaf(y)<EOL><DEDENT>gain, question = find_best_question(X, y, criterion)<EOL>if gain == <NUM_LIT:0>:<EOL><INDENT>return Leaf(y)<EOL><DEDENT>true_X, false_X, true_y, false_y = split(X, y, question)<EOL>true_branch = build_tree(<EOL>true_X, true_y,<EOL>criterion,<EOL>max_depth,<EOL>current_depth=current_depth+<NUM_LIT:1><EOL>)<EOL>false_branch = build_tree(<EOL>false_X, false_y,<EOL>criterion,<EOL>max_depth,<EOL>current_depth=current_depth+<NUM_LIT:1><EOL>)<EOL>return Node(<EOL>question=question,<EOL>true_branch=true_branch,<EOL>false_branch=false_branch<EOL>)<EOL>
Builds the decision tree.
f1711:m3
def build_extra_tree(X, y, criterion, max_depth, current_depth=<NUM_LIT:1>):
<EOL>if max_depth >= <NUM_LIT:0> and current_depth >= max_depth:<EOL><INDENT>return Leaf(y)<EOL><DEDENT>gain, question = pick_rand_question(X, y, criterion)<EOL>if gain == <NUM_LIT:0>:<EOL><INDENT>return Leaf(y)<EOL><DEDENT>true_X, false_X, true_y, false_y = split(X, y, question)<EOL>true_branch = build_tree(<EOL>true_X, true_y,<EOL>criterion,<EOL>max_depth,<EOL>current_depth=current_depth+<NUM_LIT:1><EOL>)<EOL>false_branch = build_tree(<EOL>false_X, false_y,<EOL>criterion,<EOL>max_depth,<EOL>current_depth=current_depth+<NUM_LIT:1><EOL>)<EOL>return Node(<EOL>question=question,<EOL>true_branch=true_branch,<EOL>false_branch=false_branch<EOL>)<EOL>
Builds the extremely randomized decision tree.
f1711:m4
def tree_predict(x, root, proba=False, regression=False):
if isinstance(root, Leaf):<EOL><INDENT>if proba:<EOL><INDENT>return root.probabilities<EOL><DEDENT>elif regression:<EOL><INDENT>return root.mean<EOL><DEDENT>else:<EOL><INDENT>return root.most_frequent<EOL><DEDENT><DEDENT>if root.question.match(x):<EOL><INDENT>return tree_predict(x, root.true_branch, proba=proba, regression=regression)<EOL><DEDENT>else:<EOL><INDENT>return tree_predict(x, root.false_branch, proba=proba, regression=regression)<EOL><DEDENT>
Predicts a probabilities/value/label for the sample x.
f1711:m5
def print_tree(root, space='<STR_LIT:U+0020>'):
if isinstance(root, Leaf):<EOL><INDENT>print(space + "<STR_LIT>" + str(root.most_frequent))<EOL>return<EOL><DEDENT>print(space + str(root.question))<EOL>print(space + "<STR_LIT>")<EOL>print_tree(root.true_branch, space+'<STR_LIT:U+0020>')<EOL>print(space + "<STR_LIT>")<EOL>print_tree(root.false_branch, space+'<STR_LIT:U+0020>')<EOL>
Prints the Decision Tree in a pretty way.
f1711:m6
def visualize(self):
print_tree(self.root)<EOL>
Decision Tree visualization.
f1714:c0:m6
def get_params(self, *keys):
if len(keys) == <NUM_LIT:0>:<EOL><INDENT>return vars(self)<EOL><DEDENT>else:<EOL><INDENT>return [vars(self)[k] for k in keys]<EOL><DEDENT>
Returns the specified parameters for the current model. Parameters: ----------- keys : variable sized list, containing the names of the requested parameters Returns: -------- values : list or dictionary, if any `keys` are specified those named parameters' values are returned, otherwise all parameters are returned as a dictionary
f1715:c0:m3
def set_params(self, **params):
for k, v in params.items():<EOL><INDENT>vars(self)[k] = v<EOL><DEDENT>
Sets new values to the specified parameters. Parameters: ----------- params : variable sized dictionary, n key-word arguments Example: ``` model.set_params(C=0.34, kernel="rbf") ``` Returns: -------- void : void, returns nothing
f1715:c0:m4
def fit(self, X, y):
X = np.array(X, dtype=np.float32)<EOL>y = np.array(y, dtype=np.float32)<EOL>assert X.shape[<NUM_LIT:0>] == y.shape[<NUM_LIT:0>]<EOL>return X, y<EOL>
Fits the given model to the data and labels provided. Parameters: ----------- X : matrix, shape (n_samples, n_features) The samples, the train data. y : vector, shape (n_samples,) The target labels. Returns: -------- self : instance of the model itself (`self`)
f1715:c1:m0
def predict(self, X):
return np.array(X, dtype=np.float32)<EOL>
Predicts the labels of the given data. Parameters: ----------- X : matrix, shape (n_samples, n_features) The samples. Returns: -------- y : vector, shape (n_samples,) The predicted labels.
f1715:c1:m1
def predict_proba(self, X):
return np.array(X, dtype=np.float32)<EOL>
Probability prediction of the given data. Parameters: ----------- X : matrix, shape (n_samples, n_features) The samples. Returns: -------- y : vector, shape (n_samples,) The predicted probabilities.
f1715:c1:m2
def decision_function(self, X):
return np.array(X, dtype=np.float32)<EOL>
Applies only the hypothesis (decision function) to the given data. Parameters: ----------- X : matrix, shape (n_samples, n_features) The samples. Returns: -------- y : vector, shape (n_samples,) The non shrank, raw values acquired from the outputs of the hypothesis. No shrinking, probability functions is applied.
f1715:c1:m3
def fit_predict(self, X, y, X_):
self.fit(X, y)<EOL>return self.predict(X_)<EOL>
Shortcut to `model.fit(X, y); return model.predict(X_)`. Parameters: ----------- X : matrix, shape (n_samples, n_features) The samples, the train data. y : vector, shape (n_samples,) The target labels. X_ : matrix, shape (m_samples, m_features) The samples which labels to predict. Returns: -------- y : vector, shape (m_samples,) The predicted labels.
f1715:c1:m4
def evaluate(self, X, y):
X = np.array(X, dtype=np.float32)<EOL>y = np.array(y, dtype=np.float32)<EOL>assert X.shape[<NUM_LIT:0>] == y.shape[<NUM_LIT:0>]<EOL>return X, y<EOL>
Error/Accuracy evaluation of the model. Parameters: ----------- X : matrix, shape (n_samples, n_features) The samples. y : vector, shape (n_samples,) The target labels. Returns: -------- nothing : void, prints a summary of the evaluation
f1715:c1:m5
def fit(self, X):
return np.array(X, dtype=np.float32)<EOL>
Fits the given model to the data. Parameters: ----------- X : matrix, shape (n_samples, n_features) The samples, the train data. Returns: -------- self : instance of the model itself (`self`)
f1715:c2:m0
def get_params(self, *keys):
if len(keys) == <NUM_LIT:0>:<EOL><INDENT>return vars(self)<EOL><DEDENT>else:<EOL><INDENT>return [vars(self)[k] for k in keys]<EOL><DEDENT>
Returns the specified parameters for the current preprocessor. Parameters: ----------- keys : variable sized list, containing the names of the requested parameters Returns: -------- values : list or dictionary, if any `keys` are specified those named parameters' values are returned, otherwise all parameters are returned as a dictionary
f1716:c0:m3
def set_params(self, **params):
for k, v in params.items():<EOL><INDENT>vars(self)[k] = v<EOL><DEDENT>
Sets new values to the specified parameters. Parameters: ----------- params : variable sized dictionary, n key-word arguments Example: ``` scaler.set_params(std=0.30) ``` Returns: -------- void : void, returns nothing
f1716:c0:m4
def fit(self, X, y=None):
if y is None:<EOL><INDENT>return np.array(X, dtype=np.float32)<EOL><DEDENT>else:<EOL><INDENT>X, y = (np.array(X, dtype=np.float32), <EOL>np.array(y, dtype=np.float32))<EOL>assert X.shape[<NUM_LIT:0>] == y.shape[<NUM_LIT:0>]<EOL>return X, y<EOL><DEDENT>
Passing the data to transform to the data-preprocessor. Parameters: ----------- X : array-like data y : the labels, optional
f1716:c0:m5
def transform(self, X):
return np.array(X, dtype=np.float32)<EOL>
Transforms the data given. Parameters: ----------- X : array-like data
f1716:c0:m6
def fit_transform(self, X, y=None):
if y is None:<EOL><INDENT>self.fit(X)<EOL><DEDENT>else:<EOL><INDENT>self.fit(X, y)<EOL><DEDENT>return self.transform(X)<EOL>
Fit-Transform shortcut function. Parameters: ----------- X : array-like data y : the labels, optional
f1716:c0:m7
def cluster(self, X):
return np.array(X, dtype=np.float32)<EOL>
Clustering - assigns clusters to the samples Parameters: ----------- X : array-like, shape (m, n), the samples Returns: -------- clusters : array-like, shape (m, ), the assigned clusters
f1718:c0:m0
def init_weights(self):
self.W = np.random.randn(self.n_neurons, self.n_inputs) * np.sqrt(<NUM_LIT:2> / self.n_inputs)<EOL>self.b = np.zeros((self.n_neurons, <NUM_LIT:1>))<EOL>
Performs He initialization
f1730:c1:m3
def cross_validate(model, X, y, k_folds=<NUM_LIT:5>, metric="<STR_LIT>", shuffle=True):
train_scores = []<EOL>test_scores = []<EOL>folds = KFolds(X, y, k=k_folds, shuffle=shuffle)<EOL>for X_train, X_test, y_train, y_test in folds:<EOL><INDENT>model.fit(X_train, y_train)<EOL>if metric is None or metric == "<STR_LIT>":<EOL><INDENT>train_scores.append(model.evaluate(X_train, y_train))<EOL>test_scores.append(model.evaluate(X_test, y_test))<EOL><DEDENT>else:<EOL><INDENT>train_scores.append(<EOL>metric(y_train, model.predict(X_train))<EOL>)<EOL>test_scores.append(<EOL>metric(y_test, model.predict(X_test))<EOL>)<EOL><DEDENT><DEDENT>return {<EOL>"<STR_LIT>": np.array(train_scores),<EOL>"<STR_LIT>": np.array(test_scores),<EOL>}<EOL>
Cross Validation Evaluates the given model using the given data repetitively fitting and predicting on different chunks (folds) from the data. Parameters: ----------- model : dojo-model, the model to be evaluated X : matrix, shape (n_samples, n_features), the data used for evaluation y : vector, shape (n_samples, ), the desired labels k_folds : integer, optional, the number of iterations/folds metric : the single value error/accuracy metric, optional shuffle : boolean, whether to shuffle the data before splitting it or not Returns: -------- dict_scores : dictionary with train scores and test scores
f1736:m0
def toPyModel(model_ptr):
if bool(model_ptr) == False:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>m = model_ptr.contents<EOL>m.__createfrom__ = '<STR_LIT:C>'<EOL>return m<EOL>
toPyModel(model_ptr) -> svm_model Convert a ctypes POINTER(svm_model) to a Python svm_model
f1743:m7
def svm_read_problem(data_file_name, return_scipy=False):
prob_y = []<EOL>prob_x = []<EOL>row_ptr = [<NUM_LIT:0>]<EOL>col_idx = []<EOL>for i, line in enumerate(open(data_file_name)):<EOL><INDENT>line = line.split(None, <NUM_LIT:1>)<EOL>if len(line) == <NUM_LIT:1>: line += ['<STR_LIT>']<EOL>label, features = line<EOL>prob_y += [float(label)]<EOL>if scipy != None and return_scipy:<EOL><INDENT>nz = <NUM_LIT:0><EOL>for e in features.split():<EOL><INDENT>ind, val = e.split("<STR_LIT::>")<EOL>val = float(val)<EOL>if val != <NUM_LIT:0>:<EOL><INDENT>col_idx += [int(ind)-<NUM_LIT:1>]<EOL>prob_x += [val]<EOL>nz += <NUM_LIT:1><EOL><DEDENT><DEDENT>row_ptr += [row_ptr[-<NUM_LIT:1>]+nz]<EOL><DEDENT>else:<EOL><INDENT>xi = {}<EOL>for e in features.split():<EOL><INDENT>ind, val = e.split("<STR_LIT::>")<EOL>xi[int(ind)] = float(val)<EOL><DEDENT>prob_x += [xi]<EOL><DEDENT><DEDENT>if scipy != None and return_scipy:<EOL><INDENT>prob_y = scipy.array(prob_y)<EOL>prob_x = scipy.array(prob_x)<EOL>col_idx = scipy.array(col_idx)<EOL>row_ptr = scipy.array(row_ptr)<EOL>prob_x = sparse.csr_matrix((prob_x, col_idx, row_ptr))<EOL><DEDENT>return (prob_y, prob_x)<EOL>
svm_read_problem(data_file_name, return_scipy=False) -> [y, x], y: list, x: list of dictionary svm_read_problem(data_file_name, return_scipy=True) -> [y, x], y: ndarray, x: csr_matrix Read LIBSVM-format data from data_file_name and return labels y and data instances x.
f1744:m0
def evaluations_scipy(ty, pv):
if not (scipy != None and isinstance(ty, scipy.ndarray) and isinstance(pv, scipy.ndarray)):<EOL><INDENT>raise TypeError("<STR_LIT>")<EOL><DEDENT>if len(ty) != len(pv):<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>ACC = <NUM_LIT>*(ty == pv).mean()<EOL>MSE = ((ty - pv)**<NUM_LIT:2>).mean()<EOL>l = len(ty)<EOL>sumv = pv.sum()<EOL>sumy = ty.sum()<EOL>sumvy = (pv*ty).sum()<EOL>sumvv = (pv*pv).sum()<EOL>sumyy = (ty*ty).sum()<EOL>with scipy.errstate(all = '<STR_LIT>'):<EOL><INDENT>try:<EOL><INDENT>SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))<EOL><DEDENT>except:<EOL><INDENT>SCC = float('<STR_LIT>')<EOL><DEDENT><DEDENT>return (float(ACC), float(MSE), float(SCC))<EOL>
evaluations_scipy(ty, pv) -> (ACC, MSE, SCC) ty, pv: ndarray Calculate accuracy, mean squared error and squared correlation coefficient using the true values (ty) and predicted values (pv).
f1744:m1
def evaluations(ty, pv, useScipy = True):
if scipy != None and useScipy:<EOL><INDENT>return evaluations_scipy(scipy.asarray(ty), scipy.asarray(pv))<EOL><DEDENT>if len(ty) != len(pv):<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>total_correct = total_error = <NUM_LIT:0><EOL>sumv = sumy = sumvv = sumyy = sumvy = <NUM_LIT:0><EOL>for v, y in zip(pv, ty):<EOL><INDENT>if y == v:<EOL><INDENT>total_correct += <NUM_LIT:1><EOL><DEDENT>total_error += (v-y)*(v-y)<EOL>sumv += v<EOL>sumy += y<EOL>sumvv += v*v<EOL>sumyy += y*y<EOL>sumvy += v*y<EOL><DEDENT>l = len(ty)<EOL>ACC = <NUM_LIT>*total_correct/l<EOL>MSE = total_error/l<EOL>try:<EOL><INDENT>SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))<EOL><DEDENT>except:<EOL><INDENT>SCC = float('<STR_LIT>')<EOL><DEDENT>return (float(ACC), float(MSE), float(SCC))<EOL>
evaluations(ty, pv, useScipy) -> (ACC, MSE, SCC) ty, pv: list, tuple or ndarray useScipy: convert ty, pv to ndarray, and use scipy functions for the evaluation Calculate accuracy, mean squared error and squared correlation coefficient using the true values (ty) and predicted values (pv).
f1744:m2
def svm_load_model(model_file_name):
model = libsvm.svm_load_model(model_file_name.encode())<EOL>if not model:<EOL><INDENT>print("<STR_LIT>" % model_file_name)<EOL>return None<EOL><DEDENT>model = toPyModel(model)<EOL>return model<EOL>
svm_load_model(model_file_name) -> model Load a LIBSVM model from model_file_name and return.
f1745:m0
def svm_save_model(model_file_name, model):
libsvm.svm_save_model(model_file_name.encode(), model)<EOL>
svm_save_model(model_file_name, model) -> None Save a LIBSVM model to the file model_file_name.
f1745:m1
def svm_train(arg1, arg2=None, arg3=None):
prob, param = None, None<EOL>if isinstance(arg1, (list, tuple)) or (scipy and isinstance(arg1, scipy.ndarray)):<EOL><INDENT>assert isinstance(arg2, (list, tuple)) or (scipy and isinstance(arg2, (scipy.ndarray, sparse.spmatrix)))<EOL>y, x, options = arg1, arg2, arg3<EOL>param = svm_parameter(options)<EOL>prob = svm_problem(y, x, isKernel=(param.kernel_type == PRECOMPUTED))<EOL><DEDENT>elif isinstance(arg1, svm_problem):<EOL><INDENT>prob = arg1<EOL>if isinstance(arg2, svm_parameter):<EOL><INDENT>param = arg2<EOL><DEDENT>else:<EOL><INDENT>param = svm_parameter(arg2)<EOL><DEDENT><DEDENT>if prob == None or param == None:<EOL><INDENT>raise TypeError("<STR_LIT>")<EOL><DEDENT>if param.kernel_type == PRECOMPUTED:<EOL><INDENT>for i in range(prob.l):<EOL><INDENT>xi = prob.x[i]<EOL>idx, val = xi[<NUM_LIT:0>].index, xi[<NUM_LIT:0>].value<EOL>if idx != <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if val <= <NUM_LIT:0> or val > prob.n:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>if param.gamma == <NUM_LIT:0> and prob.n > <NUM_LIT:0>:<EOL><INDENT>param.gamma = <NUM_LIT:1.0> / prob.n<EOL><DEDENT>libsvm.svm_set_print_string_function(param.print_func)<EOL>err_msg = libsvm.svm_check_parameter(prob, param)<EOL>if err_msg:<EOL><INDENT>raise ValueError('<STR_LIT>' % err_msg)<EOL><DEDENT>if param.cross_validation:<EOL><INDENT>l, nr_fold = prob.l, param.nr_fold<EOL>target = (c_double * l)()<EOL>libsvm.svm_cross_validation(prob, param, nr_fold, target)<EOL>ACC, MSE, SCC = evaluations(prob.y[:l], target[:l])<EOL>if param.svm_type in [EPSILON_SVR, NU_SVR]:<EOL><INDENT>print("<STR_LIT>" % MSE)<EOL>print("<STR_LIT>" % SCC)<EOL>return MSE<EOL><DEDENT>else:<EOL><INDENT>print("<STR_LIT>" % ACC)<EOL>return ACC<EOL><DEDENT><DEDENT>else:<EOL><INDENT>m = libsvm.svm_train(prob, param)<EOL>m = toPyModel(m)<EOL>m.x_space = prob.x_space<EOL>return m<EOL><DEDENT>
svm_train(y, x [, options]) -> model | ACC | MSE y: a list/tuple/ndarray of l true labels (type must be int/double). x: 1. a list/tuple of l training instances. Feature vector of each training instance is a list/tuple or dictionary. 2. an l * n numpy ndarray or scipy spmatrix (n: number of features). svm_train(prob [, options]) -> model | ACC | MSE svm_train(prob, param) -> model | ACC| MSE Train an SVM model from data (y, x) or an svm_problem prob using 'options' or an svm_parameter param. If '-v' is specified in 'options' (i.e., cross validation) either accuracy (ACC) or mean-squared error (MSE) is returned. options: -s svm_type : set type of SVM (default 0) 0 -- C-SVC (multi-class classification) 1 -- nu-SVC (multi-class classification) 2 -- one-class SVM 3 -- epsilon-SVR (regression) 4 -- nu-SVR (regression) -t kernel_type : set type of kernel function (default 2) 0 -- linear: u'*v 1 -- polynomial: (gamma*u'*v + coef0)^degree 2 -- radial basis function: exp(-gamma*|u-v|^2) 3 -- sigmoid: tanh(gamma*u'*v + coef0) 4 -- precomputed kernel (kernel values in training_set_file) -d degree : set degree in kernel function (default 3) -g gamma : set gamma in kernel function (default 1/num_features) -r coef0 : set coef0 in kernel function (default 0) -c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1) -n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5) -p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1) -m cachesize : set cache memory size in MB (default 100) -e epsilon : set tolerance of termination criterion (default 0.001) -h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1) -b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0) -wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1) -v n: n-fold cross validation mode -q : quiet mode (no outputs)
f1745:m2
def svm_predict(y, x, m, options="<STR_LIT>"):
def info(s):<EOL><INDENT>print(s)<EOL><DEDENT>if scipy and isinstance(x, scipy.ndarray):<EOL><INDENT>x = scipy.ascontiguousarray(x) <EOL><DEDENT>elif sparse and isinstance(x, sparse.spmatrix):<EOL><INDENT>x = x.tocsr()<EOL><DEDENT>elif not isinstance(x, (list, tuple)):<EOL><INDENT>raise TypeError("<STR_LIT>".format(type(x)))<EOL><DEDENT>if (not isinstance(y, (list, tuple))) and (not (scipy and isinstance(y, scipy.ndarray))):<EOL><INDENT>raise TypeError("<STR_LIT>".format(type(y)))<EOL><DEDENT>predict_probability = <NUM_LIT:0><EOL>argv = options.split()<EOL>i = <NUM_LIT:0><EOL>while i < len(argv):<EOL><INDENT>if argv[i] == '<STR_LIT>':<EOL><INDENT>i += <NUM_LIT:1><EOL>predict_probability = int(argv[i])<EOL><DEDENT>elif argv[i] == '<STR_LIT>':<EOL><INDENT>info = print_null<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>i+=<NUM_LIT:1><EOL><DEDENT>svm_type = m.get_svm_type()<EOL>is_prob_model = m.is_probability_model()<EOL>nr_class = m.get_nr_class()<EOL>pred_labels = []<EOL>pred_values = []<EOL>if scipy and isinstance(x, sparse.spmatrix):<EOL><INDENT>nr_instance = x.shape[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>nr_instance = len(x)<EOL><DEDENT>if predict_probability:<EOL><INDENT>if not is_prob_model:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if svm_type in [NU_SVR, EPSILON_SVR]:<EOL><INDENT>info("<STR_LIT>"<EOL>"<STR_LIT>" % m.get_svr_probability());<EOL>nr_class = <NUM_LIT:0><EOL><DEDENT>prob_estimates = (c_double * nr_class)()<EOL>for i in range(nr_instance):<EOL><INDENT>if scipy and isinstance(x, sparse.spmatrix):<EOL><INDENT>indslice = slice(x.indptr[i], x.indptr[i+<NUM_LIT:1>])<EOL>xi, idx = gen_svm_nodearray((x.indices[indslice], x.data[indslice]), isKernel=(m.param.kernel_type == PRECOMPUTED))<EOL><DEDENT>else:<EOL><INDENT>xi, idx = gen_svm_nodearray(x[i], isKernel=(m.param.kernel_type == PRECOMPUTED))<EOL><DEDENT>label = libsvm.svm_predict_probability(m, xi, prob_estimates)<EOL>values = prob_estimates[:nr_class]<EOL>pred_labels += [label]<EOL>pred_values += [values]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if is_prob_model:<EOL><INDENT>info("<STR_LIT>")<EOL><DEDENT>if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC):<EOL><INDENT>nr_classifier = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>nr_classifier = nr_class*(nr_class-<NUM_LIT:1>)//<NUM_LIT:2><EOL><DEDENT>dec_values = (c_double * nr_classifier)()<EOL>for i in range(nr_instance):<EOL><INDENT>if scipy and isinstance(x, sparse.spmatrix):<EOL><INDENT>indslice = slice(x.indptr[i], x.indptr[i+<NUM_LIT:1>])<EOL>xi, idx = gen_svm_nodearray((x.indices[indslice], x.data[indslice]), isKernel=(m.param.kernel_type == PRECOMPUTED))<EOL><DEDENT>else:<EOL><INDENT>xi, idx = gen_svm_nodearray(x[i], isKernel=(m.param.kernel_type == PRECOMPUTED))<EOL><DEDENT>label = libsvm.svm_predict_values(m, xi, dec_values)<EOL>if(nr_class == <NUM_LIT:1>):<EOL><INDENT>values = [<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>values = dec_values[:nr_classifier]<EOL><DEDENT>pred_labels += [label]<EOL>pred_values += [values]<EOL><DEDENT><DEDENT>if len(y) == <NUM_LIT:0>:<EOL><INDENT>y = [<NUM_LIT:0>] * nr_instance<EOL><DEDENT>ACC, MSE, SCC = evaluations(y, pred_labels)<EOL>if svm_type in [EPSILON_SVR, NU_SVR]:<EOL><INDENT>info("<STR_LIT>" % MSE)<EOL>info("<STR_LIT>" % SCC)<EOL><DEDENT>else:<EOL><INDENT>info("<STR_LIT>" % (ACC, int(round(nr_instance*ACC/<NUM_LIT:100>)), nr_instance))<EOL><DEDENT>return pred_labels, (ACC, MSE, SCC), pred_values<EOL>
svm_predict(y, x, m [, options]) -> (p_labels, p_acc, p_vals) y: a list/tuple/ndarray of l true labels (type must be int/double). It is used for calculating the accuracy. Use [] if true labels are unavailable. x: 1. a list/tuple of l training instances. Feature vector of each training instance is a list/tuple or dictionary. 2. an l * n numpy ndarray or scipy spmatrix (n: number of features). Predict data (y, x) with the SVM model m. options: -b probability_estimates: whether to predict probability estimates, 0 or 1 (default 0); for one-class SVM only 0 is supported. -q : quiet mode (no outputs). The return tuple contains p_labels: a list of predicted labels p_acc: a tuple including accuracy (for classification), mean-squared error, and squared correlation coefficient (for regression). p_vals: a list of decision values or probability estimates (if '-b 1' is specified). If k is the number of classes, for decision values, each element includes results of predicting k(k-1)/2 binary-class SVMs. For probabilities, each element contains k values indicating the probability that the testing instance is in each class. Note that the order of classes here is the same as 'model.label' field in the model structure.
f1745:m3
def gradient(self, P, Q, Y, i):
return <NUM_LIT:4> * sum([<EOL>(P[i, j] - Q[i, j]) * (Y[i] - Y[j]) * (<NUM_LIT:1> + np.linalg.norm(Y[i] - Y[j]) ** <NUM_LIT:2>) ** -<NUM_LIT:1>for j in range(Y.shape[<NUM_LIT:0>])<EOL>])<EOL>
Computes the gradient of KL divergence with respect to the i'th example of Y
f1751:c3:m2
def __init__(self, key, email, scopes, subject=None):
self._key = None<EOL>self._email = None<EOL>self._scopes = None<EOL>self._subject = None<EOL>self._issued_at = None<EOL>self._access_token = None<EOL>self.key = key<EOL>self.email = email<EOL>self.scopes = scopes<EOL>self.subject = subject<EOL>
Constructs new instance for given service account. Although it is possible to use this, it isn't recommended. You have to parse private key yourself and make ``OpenSSL.crypto.PKey`` out of it. Because Google Developer Console generates keys in two file formats - JSON and PKCS#12, it is advised to use ``ServiceAccount.from_json`` or ``ServiceAccount.from_pkcs12``. Args: key (OpenSSL.crypto.PKey) - RSA private key used for signing JWT. email (str) - Service account email. scopes (Union[str, collections.Iterable[str]]) - List of permissions that the application requests. subject (str) - The email address of the user for which the application is requesting delegated access.
f1761:c0:m0
@classmethod<EOL><INDENT>def from_json(cls, key, scopes, subject=None):<DEDENT>
credentials_type = key['<STR_LIT:type>']<EOL>if credentials_type != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % credentials_type)<EOL><DEDENT>email = key['<STR_LIT>']<EOL>key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,<EOL>key['<STR_LIT>'])<EOL>return cls(key=key, email=email, scopes=scopes, subject=subject)<EOL>
Alternate constructor intended for using JSON format of private key. Args: key (dict) - Parsed JSON with service account credentials. scopes (Union[str, collections.Iterable[str]]) - List of permissions that the application requests. subject (str) - The email address of the user for which the application is requesting delegated access. Returns: ServiceAccount
f1761:c0:m1
@classmethod<EOL><INDENT>def from_pkcs12(cls, key, email, scopes, subject=None,<EOL>passphrase=PKCS12_PASSPHRASE):<DEDENT>
key = OpenSSL.crypto.load_pkcs12(key, passphrase).get_privatekey()<EOL>return cls(key=key, email=email, scopes=scopes, subject=subject)<EOL>
Alternate constructor intended for using .p12 files. Args: key (dict) - Parsed JSON with service account credentials. email (str) - Service account email. scopes (Union[str, collections.Iterable[str]]) - List of permissions that the application requests. subject (str) - The email address of the user for which the application is requesting delegated access. passphrase (str) - Passphrase of private key file. Google generates .p12 files secured with fixed 'notasecret' passphrase, so if you didn't change it it's fine to omit this parameter. Returns: ServiceAccount
f1761:c0:m2
@property<EOL><INDENT>def key(self):<DEDENT>
return self._key<EOL>
RSA private key used to sign JSON Web Tokens. Returns: OpenSSL.crypto.PKey
f1761:c0:m3
@property<EOL><INDENT>def scopes(self):<DEDENT>
return self._scopes.split()<EOL>
Scopes requested in OAuth2 access token request. Although Google accepts scopes as space delimited string, accessing this property will return tuple of scopes. Returns: tuple[str]
f1761:c0:m7
@property<EOL><INDENT>def issued_at(self):<DEDENT>
issued_at = self._issued_at<EOL>if issued_at is None:<EOL><INDENT>self._issued_at = int(time.time())<EOL><DEDENT>return self._issued_at<EOL>
Time when access token was requested, as seconds since epoch. Note: Accessing this property when there wasn't any request attempts will return current time. Returns: int
f1761:c0:m11
@property<EOL><INDENT>def expiration_time(self):<DEDENT>
return self.issued_at + <NUM_LIT><EOL>
Expiration time of access token, as seconds since epoch. Although it is possible to request access tokens with any expiration time less than one hour, Google regardless of value sent will issue the token for one hour. Returns: int
f1761:c0:m13
@property<EOL><INDENT>def access_token(self):<DEDENT>
if (self._access_token is None or<EOL>self.expiration_time <= int(time.time())):<EOL><INDENT>resp = self.make_access_request()<EOL>self._access_token = resp.json()['<STR_LIT>']<EOL><DEDENT>return self._access_token<EOL>
Stores always valid OAuth2 access token. Note: Accessing this property may result in HTTP request. Returns: str
f1761:c0:m14
def make_access_request(self):
del self.issued_at<EOL>assertion = b'<STR_LIT:.>'.join((self.header(), self.claims(), self.signature()))<EOL>post_data = {<EOL>'<STR_LIT>': GRANT_TYPE,<EOL>'<STR_LIT>': assertion,<EOL>}<EOL>resp = requests.post(AUDIENCE, post_data)<EOL>if resp.status_code != <NUM_LIT:200>:<EOL><INDENT>raise AuthenticationError(resp)<EOL><DEDENT>return resp<EOL>
Makes an OAuth2 access token request with crafted JWT and signature. The core of this module. Based on arguments it creates proper JWT for you and signs it with supplied private key. Regardless of present valid token, it always clears ``issued_at`` property, which in turn results in requesting fresh OAuth2 access token. Returns: requests.Response Raises: google_oauth.exceptions.AuthenticationError: If there was any non-200 HTTP-code from Google. requests.RequestException: Something went wrong when doing HTTP request.
f1761:c0:m18
def authorized_request(self, method, url, **kwargs):
headers = kwargs.pop('<STR_LIT>', {})<EOL>if headers.get('<STR_LIT>') or kwargs.get('<STR_LIT>'):<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>headers['<STR_LIT>'] = '<STR_LIT>' + self.access_token<EOL>return requests.request(method, url, headers=headers, **kwargs)<EOL>
Shortcut for requests.request with proper Authorization header. Note: If you put auth keyword argument or Authorization in headers keyword argument, this will raise an exception. Decide what you want to do! Args: method (str) - HTTP method of this request, like GET or POST. url (str) - URL of this request (one of Google APIs). Examples: >>> scope = 'https://www.googleapis.com/auth/plus.login' >>> url = 'https://www.googleapis.com/plus/v1/people' \ >>> '?query=Guuido+van+Rossum' >>> key = json.load(open('/path/to/credentials.json')) >>> auth = ServiceAccount.from_json(key=key, scopes=scope) >>> auth.authorized_request(method='get', url=url) Returns: requests.Response
f1761:c0:m19
@staticmethod<EOL><INDENT>def to_object(item):<DEDENT>
def convert(item): <EOL><INDENT>if isinstance(item, dict):<EOL><INDENT>return IterableObject({k: convert(v) for k, v in item.items()})<EOL><DEDENT>if isinstance(item, list):<EOL><INDENT>def yield_convert(item):<EOL><INDENT>for index, value in enumerate(item):<EOL><INDENT>yield convert(value)<EOL><DEDENT><DEDENT>return list(yield_convert(item))<EOL><DEDENT>else:<EOL><INDENT>return item<EOL><DEDENT><DEDENT>return convert(item)<EOL>
Convert a dictionary to an object (recursive).
f1762:c2:m0
@staticmethod<EOL><INDENT>def to_dict(item):<DEDENT>
def convert(item):<EOL><INDENT>if isinstance(item, IterableObject):<EOL><INDENT>if isinstance(item.source, dict):<EOL><INDENT>return {k: convert(v.source) if hasattr(v, '<STR_LIT:source>') else convert(v) for k, v in item}<EOL><DEDENT>else:<EOL><INDENT>return convert(item.source)<EOL><DEDENT><DEDENT>elif isinstance(item, dict):<EOL><INDENT>return {k: convert(v) for k, v in item.items()}<EOL><DEDENT>elif isinstance(item, list):<EOL><INDENT>def yield_convert(item):<EOL><INDENT>for index, value in enumerate(item):<EOL><INDENT>yield convert(value)<EOL><DEDENT><DEDENT>return list(yield_convert(item))<EOL><DEDENT>else:<EOL><INDENT>return item<EOL><DEDENT><DEDENT>return convert(item)<EOL>
Convert an object to a dictionary (recursive).
f1762:c2:m1
def normal_user(app_id, login, password):
api = userapp.API(app_id=app_id, debug=True)<EOL>results = api.user.login(login=login, password=password)<EOL>token = results.token<EOL>user_id = results.user_id<EOL>lock_type = results.lock_type<EOL>api.get_logger().debug("<STR_LIT>".format(t=token, u=user_id, l=lock_type))<EOL>myself = api.user.get()<EOL>count = api.user.count()<EOL>api.user.logout()<EOL>
Example #1: A normal user logging in. You can tell because the API token isn't set until we get it back from user_login().
f1764:m0
def admin_user(app_id, token):
api = userapp.API(app_id=app_id, token=token, debug=True)<EOL>count = api.user.count()<EOL>results = api.user.search()<EOL>user_list = results.items<EOL>results = api.user.get(user_id = [<EOL>user_list[<NUM_LIT:0>].user_id,<EOL>user_list[<NUM_LIT:1>].user_id,<EOL>user_list[<NUM_LIT:2>].user_id<EOL>])<EOL>results = api.user.invoice.search()<EOL>results = api.user.paymentMethod.search()<EOL>
Example #2: An admin logging in. You can tell because we set the API token when we create the instance.
f1764:m1
async def handle(self):
<EOL>listeners = []<EOL>for key, value in self.beat_config.items():<EOL><INDENT>listeners.append(asyncio.ensure_future(<EOL>self.listener(key)<EOL>))<EOL><DEDENT>emitters = []<EOL>for key, value in self.beat_config.items():<EOL><INDENT>emitters.append(asyncio.ensure_future(<EOL>self.emitters(key, value)<EOL>))<EOL><DEDENT>await asyncio.wait(emitters)<EOL>await asyncio.wait(listeners)<EOL>
Listens on all the provided channels and handles the messages.
f1781:c0:m1
async def emitters(self, key, value):
while True:<EOL><INDENT>await asyncio.sleep(value['<STR_LIT>'].total_seconds())<EOL>await self.channel_layer.send(key, {<EOL>"<STR_LIT:type>": value['<STR_LIT:type>'],<EOL>"<STR_LIT:message>": value['<STR_LIT:message>']<EOL>})<EOL><DEDENT>
Single-channel emitter
f1781:c0:m2