max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
app/upload/excelparser.py | S3Infosoft/s3-payment-autoaudit | 0 | 12788451 | import re
from datetime import date
import pyexcel
def convert_to_date(datetime: str):
"""Convert the date-time string(dd/mm/yyyy) to date time object"""
if type(datetime) == str:
date_, *_ = datetime.split()
dd, mm, yyyy = [int(val) for val in date_.split("/")]
return date(year=yyyy, month=mm, day=dd)
return datetime
def rename_header(headers: list) -> list:
"""This function is replacing all the column names of the given excel sheet with the field names of the Type8"""
for i in range(len(headers)):
headers[i] = headers[i].replace("Transaction ID", "transaction_id") \
.replace("Value Date", "transaction_value_date") \
.replace("Txn Posted Date", "transaction_posted_date") \
.replace("Description", "mode_of_payment") \
.replace("Cr/Dr", "credit") \
.replace("Transaction Amount(INR)", "transaction_amount")
return headers
def xlparser(xlfile):
"""Parse the excel data coming from forms"""
xl = pyexcel.get_book(file_type="xls", file_content=xlfile)
sheets = tuple(xl.dict.keys()) # get all the sheet names from the excel file
rows = xl.dict.get(sheets[0])
headers = rename_header(rows[6][1:]) # get all the data from the first sheet
for row in rows[7:]:
data = dict(zip(headers, row[1:]))
data["mode_of_payment"] = (
re.findall(r"RAZORPAY|MSWIPE|CCARD|GOOGLE|AXISROOMS|ICICI|SELF|FINO|MAKEMYTRIP|IBIBO|Paytm",
data.get("mode_of_payment"))[0]
)
data['transaction_value_date'] = convert_to_date(data['transaction_value_date'])
data['transaction_posted_date'] = convert_to_date(data['transaction_posted_date'])
data.pop("ChequeNo.")
yield data
if __name__ == "__main__":
with open("./ICICI_648805052604_sample.xls", "rb") as f:
xlparser(f.read())
| 3.484375 | 3 |
twitter_bot.py | etopuz/twitter-bot-with-selenium | 1 | 12788452 | import time
import pickle
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
import os.path
def load_cookies(driver):
for cookie in pickle.load(open("TwitterCookies.pkl", "rb")):
driver.add_cookie(cookie)
def save_cookies(driver):
pickle.dump(driver.get_cookies(), open("TwitterCookies.pkl", "wb"))
# read login details from file
def account_info():
with open("account_info.txt", "r") as file:
info = file.read().split()
email = info[0]
password = info[1]
file.close()
return email, password
def login(driver, email, password):
email_xpath = '/html/body/div/div/div/div[2]/main/div/div/div[2]/form/div/div[1]/label/div/div[2]/div/input'
password_xpath = '/html/body/div/div/div/div[2]/main/div/div/div[2]/form/div/div[2]/label/div/div[2]/div/input'
login_xpath = '/html/body/div/div/div/div[2]/main/div/div/div[2]/form/div/div[3]/div/div/span/span'
time.sleep(3)
driver.find_element_by_xpath(email_xpath).send_keys(email)
time.sleep(0.5)
driver.find_element_by_xpath(password_xpath).send_keys(password)
time.sleep(0.5)
driver.find_element_by_xpath(login_xpath).click()
def tweet_picture(author_name, picture_path):
options = Options()
options.add_argument("start-maximized")
driver = webdriver.Firefox(options=options)
driver.get("https://twitter.com/login")
# check is user login before
if os.path.isfile('TwitterCookies.pkl'):
time.sleep(1)
load_cookies(driver)
else:
email, password = account_info()
login(driver, email, password)
save_cookies(driver)
# xpath's for sharing tweets
tweet_xpath = '/html/body/div/div/div/div[2]/header/div/div/div/div[1]/div[3]/a/div'
message_xpath = '/html/body/div/div/div/div[1]/div[2]/div/div/div/div/div/div[2]/div[2]/div/div[3]/div/div/div/div[' \
'1]/div/div/div/div/div[2]/div[1]/div/div/div/div/div/div/div/div/div/div[1]/div/div/div/div[' \
'2]/div '
media_xpath = '/html/body/div/div/div/div[1]/div[2]/div/div/div/div/div/div[2]/div[2]/div/div[3]/div/div/div/div[1]/div/div/div/div/div[2]/div[4]/div/div/div[1]/input'
post_xpath = '/html/body/div/div/div/div[1]/div[2]/div/div/div/div/div/div[2]/div[2]/div/div[3]/div/div/div/div[' \
'1]/div/div/div/div/div[2]/div[4]/div/div/div[2]/div[4]/div/span/span '
# sharing tweet steps
time.sleep(4)
driver.find_element_by_xpath(tweet_xpath).click()
time.sleep(1)
driver.find_element_by_xpath(message_xpath).send_keys(f"Author: {author_name}")
time.sleep(1)
file_upload_button = driver.find_element_by_xpath(media_xpath)
file_upload_button.send_keys(picture_path)
time.sleep(2)
driver.find_element_by_xpath(post_xpath).click()
| 2.859375 | 3 |
core/hooks/ImageReconstructHook.py | szokejokepu/natural-rws | 0 | 12788453 | from core.argo.core.argoLogging import get_logger
tf_logging = get_logger()
import numpy as np
from core.argo.core.hooks.AbstractImagesReconstructHook import AbstractImagesReconstructHook
from core.argo.core.utils.ImagesSaver import ImagesSaver
class ImagesReconstructHook(AbstractImagesReconstructHook):
def do_when_triggered(self, run_context, run_values):
# tf_logging.info("trigger for ImagesGeneratorHook s" + str(global_step) + " s/e" + str(global_step/global_epoch)+ " e" + str(global_epoch))
tf_logging.info("trigger for ImagesReconstructHook")
self.load_images(run_context.session)
for ds_key in self._images:
images, images_target = self._images[ds_key][1:3]
zs, means = self._model.encode(images, run_context.session)
reconstructed_images_m_sample, reconstructed_images_m_means = self._model.decode(means, run_context.session)
reconstructed_images_z_sample, reconstructed_images_z_means = self._model.decode(zs, run_context.session)
rows = int(np.ceil(len(images) / self._n_images_columns))
panel = [[] for x in range(rows * 6)]
c = 0
for i in range(0, 6 * rows, 6):
for j in range(self._n_images_columns):
panel[i].append(images[c])
panel[i + 1].append(images_target[c])
panel[i + 2].append(reconstructed_images_m_means[c])
panel[i + 3].append(reconstructed_images_m_sample[c])
panel[i + 4].append(reconstructed_images_z_means[c])
panel[i + 5].append(reconstructed_images_z_sample[c])
if c == len(images) - 1:
break
else:
c = c + 1
# "[1st] original image [2nd] recostructed mean [3rd] reconstr z"
self.images_saver.save_images(panel,
fileName="reconstruction_" + str(ds_key) + "_" + self._time_ref_shortstr + "_" + str(
self._time_ref).zfill(4),
title=self._plot_title,
fontsize=9)
| 2.25 | 2 |
vcue/basics.py | losDaniel/vcue-repo | 0 | 12788454 | import sys
sys.path.append('../')
import bz2, os
import random, string
import importlib
import _pickle as pickle
from datetime import datetime, timedelta
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# OS & list MANAGEMENT FUNCTIONS <~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
def find(name, path):
# Find the file name in any of the directories or sub-directories in the path
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
def getFilepaths(directory):
"""
This function will generate the file names in a directory
tree by walking the tree either top-down or bottom-up. For each
directory in the tree rooted at directory top (including top itself),
it yields a 3-tuple (dirpath, dirnames, filenames).
"""
file_paths = [] # List which will store all of the full filepaths.
# Walk the tree.
for root, directories, files in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
file_paths.append(filepath) # Add it to the list.
return file_paths # Self-explanatory.
def absoluteFilePaths(directory):
'''Get the absolute file path for every file in the given directory'''
for dirpath,_,filenames in os.walk(directory):
for f in filenames:
yield os.path.abspath(os.path.join(dirpath, f))
def import_package_string(package_string):
'''Submit a string argument to be imported as a package (i.e. day_trader.models.LU01_A3). No need to include the .py'''
return importlib.import_module(package_string)
def genrs(length=10):
'''Generate random string'''
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
def remove_values_from_list(the_list, val):
'''Remove a specific value from a list'''
return [value for value in the_list if value != val]
def chunks(l,n):
'''Break list l up into chunks of size n'''
for i in range(0, len(l), n):
yield l[i:i+n]
def sizeFirstBin(data, col, minimum_bin_size, vals=None):
'''Bin the data based on the vals, iterates through each val assigning the corresponding rows to a bin while that bin size has not reached the minimum_bin_size
__________
parameters
- data : pd.DataFrame
- col : the columns to bin based on
- minimum_bin_size : int. Each bin must have at least this size
- vals : list. Will only bin the values in this list. The default is all the unique values of "col"
'''
if vals is None:
values = sorted(data[col].unique())
else:
values = vals
bins = {}
bin_number = 1
bin_total = 0
vc = dict(data[col].value_counts())
for val in values:
if bin_total<minimum_bin_size:
if bin_number not in bins:
bins[bin_number] = []
bins[bin_number].append(val)
bin_total += vc[val]
else:
bins[bin_number].append(val)
bin_total += vc[val]
else:
bin_number+=1
bins[bin_number] = []
bins[bin_number].append(val)
bin_total = vc[val]
return bins
def nondups(items : list):
'''Return True if list has no duplicate items'''
print('List length:',len(items))
print('Unique items:',len(set(items)))
return len(items) == len(set(items))
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# Storage & COMPRESSION FUNCTIONS <~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# Article on pickling and compressed pickling functions
# https://betterprogramming.pub/load-fast-load-big-with-compressed-pickles-5f311584507e
def full_pickle(title, data):
'''pickles the submited data and titles it'''
pikd = open(title + '.pickle', 'wb')
pickle.dump(data, pikd)
pikd.close()
def loosen(file):
'''loads and returns a pickled objects'''
pikd = open(file, 'rb')
data = pickle.load(pikd)
pikd.close()
return data
def compressed_pickle(title, data):
'''
Pickle a file and then compress it into a file with extension .pbz2
__________
parameters
- title : title of the file you want to save (will be saved with .pbz2 extension automatically)
- data : object you want to save
'''
with bz2.BZ2File(title + '.pbz2', 'w') as f:
pickle.dump(data, f)
def decompress_pickle(filename):
'''filename - file name including .pbz2 extension'''
data = bz2.BZ2File(filename, 'rb')
data = pickle.load(data)
return data
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# Time Management FUNCTIONS <~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# Time Stuff
def cuttomin(x):
'''Cut a time stamp at the minutes (exclude seconds or more precise)'''
return datetime.strftime(x, '%m-%d %H:%M')
def cuttohrs(x):
'''Cut a time stamp at the hours (exclude minutes or more precise)'''
return datetime.strftime(x, '%m-%d %H')
def cuttodays(x):
'''Cut a time stamp at the date (exclude hour or more precise)'''
return datetime.strftime(x, '%y-%m-%d')
def datetime_range(start, end, delta):
'''Returns the times between start and end in steps of delta'''
current = start
while current < end:
yield current
current += delta
def prev_weekday(adate):
'''Returns the date of the last weekday before the given date'''
adate -= timedelta(days=1)
while adate.weekday() > 4: # Mon-Fri are 0-4
adate -= timedelta(days=1)
return adate
| 2.921875 | 3 |
jiraexport/issuestream.py | nicwaller/jira-text | 0 | 12788455 | """Fetch JIRA issues from database
Defines a schema using SQLalchemy
"""
from __future__ import absolute_import
from __future__ import print_function
import logging
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from . import jiraschema
def get_issues(user, password, host="localhost", database="jira"):
"""Get all the JIRA issues from a database.
"""
connstr = 'mysql+pymysql://{}:{}@{}/{}?charset=utf8'.format(user, password, host, database)
engine = sqlalchemy.create_engine(connstr, echo=False)
connection = engine.connect()
connection.execution_options(stream_results=True)
Session = sessionmaker(bind=engine)
session = Session()
# This is a bit of a hack. How else are you supposed to make a progress bar with a generator?
count = session.query(jiraschema.Issue).count()
yield count
for issue in session.query(jiraschema.Issue):
try:
yield issue.as_dict()
except Exception:
# Do not try to attach the sqlalchemy record as extra info. There be dragons.
logging.error("Uncaught exception trying to process a record. Oh well. Too bad.", exc_info=True)
| 2.859375 | 3 |
examples/kernels/sphere/sphere_kernels.py | NoemieJaquier/GaBOflow | 5 | 12788456 | import numpy as np
import gpflow
import matplotlib.pyplot as plt
import matplotlib.pylab as pl
from mpl_toolkits.mplot3d import axes3d, Axes3D
from BoManifolds.Riemannian_utils.sphere_utils import logmap
from BoManifolds.kernel_utils.kernels_sphere_tf import SphereGaussianKernel, SphereLaplaceKernel
from BoManifolds.plot_utils.manifold_plots import plot_sphere
plt.rcParams['text.usetex'] = True # use Latex font for plots
plt.rcParams['text.latex.preamble'] = [r'\usepackage{bm}']
"""
This example shows the use of different kernels for the hypershere manifold S^n , used for Gaussian process regression.
The tested function corresponds to a Gaussian distribution with a mean defined on the sphere and a covariance defined on
the tangent space of the mean. Training data are generated "far" from the mean. The trained Gaussian process is then
used to determine the value of the function from test data sampled around the mean of the test function.
The kernels used are:
- Manifold-RBF kernel (geometry-aware)
- Laplace kernel (geometry-aware)
- Euclidean kernel (classical geometry-unaware)
This example works with GPflow version = 0.5 (used by GPflowOpt).
Authors: <NAME> and <NAME>, 2019
License: MIT
Contact: <EMAIL>, <EMAIL>
"""
def test_function(x, mu_test_function):
# Parameters
sigma_test_fct = np.array([[0.6, 0.2, 0], [0.2, 0.3, -0.01], [0, -0.01, 0.2]])
inv_sigma_test_fct = np.linalg.inv(sigma_test_fct)
det_sigma_test_fct = np.linalg.det(sigma_test_fct)
# Function value
x_proj = logmap(x, mu_test_function)
return np.exp(- 0.5 * np.dot(x_proj.T, np.dot(inv_sigma_test_fct, x_proj))) / np.sqrt(
(2 * np.pi) ** dim * det_sigma_test_fct)
def plot_gaussian_process_prediction(figure_handle, mu, test_data, mean_est, mu_test_fct, title):
ax = Axes3D(figure_handle)
# Make the panes transparent
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# Make the grid lines transparent
ax.xaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.yaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.zaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
# Remove axis
ax._axis3don = False
# Initial view
# ax.view_init(elev=10, azim=-20.) # (default: elev=30, azim=-60)
ax.view_init(elev=10, azim=30.) # (default: elev=30, azim=-60)
# Plot sphere
plot_sphere(ax, alpha=0.4)
# Plot training data on the manifold
plt_scale_fact = test_function(mu_test_fct, mu_test_fct)[0, 0]
nb_data_test = test_data.shape[0]
for n in range(nb_data_test):
ax.scatter(test_data[n, 0], test_data[n, 1], test_data[n, 2], c=pl.cm.inferno(mean_est[n] / plt_scale_fact))
# Plot mean of Gaussian test function
ax.scatter(mu[0], mu[1], mu[2], c='g', marker='D')
plt.title(title, size=25)
if __name__ == "__main__":
np.random.seed(1234)
# Define the test function mean
mu_test_fct = np.array([1 / np.sqrt(2), 1 / np.sqrt(2), 0])
# Generate random data on the sphere
nb_data = 20
dim = 3
mean = np.array([1, 0, 0])
mean = mean / np.linalg.norm(mean)
fact_cov = 0.1
cov = fact_cov * np.eye(dim)
data = np.random.multivariate_normal(mean, cov, nb_data)
x_man = data / np.linalg.norm(data, axis=1)[:, None]
y_train = np.zeros((nb_data,1))
for n in range(nb_data):
y_train[n] = test_function(x_man[n], mu_test_fct)
# Generate test data on the sphere
nb_data_test = 10
mean_test = mu_test_fct
mean_test = mean_test / np.linalg.norm(mean)
fact_cov = 0.1
cov_test = fact_cov * np.eye(dim)
data = np.random.multivariate_normal(mean_test, cov_test, nb_data_test)
x_man_test = data / np.linalg.norm(data, axis=1)[:, None]
y_test = np.zeros((nb_data_test, 1))
for n in range(nb_data_test):
y_test[n] = test_function(x_man_test[n], mu_test_fct)
# Plot training data - 3D figure
fig = plt.figure(figsize=(5, 5))
plot_gaussian_process_prediction(fig, mu_test_fct, x_man, y_train, mu_test_fct, r'Training data')
# Plot true test data - 3D figure
fig = plt.figure(figsize=(5, 5))
plot_gaussian_process_prediction(fig, mu_test_fct, x_man_test, y_test, mu_test_fct, r'Test data (ground truth)')
# ### Gaussian kernel
# Define the kernel
k_gauss = SphereGaussianKernel(input_dim=dim, active_dims=range(dim), beta_min=7.0, beta=10.0, variance=1.)
# Kernel computation
K1 = k_gauss.compute_K_symm(x_man)
K12 = k_gauss.compute_K(x_man, x_man_test)
K2 = k_gauss.compute_K_symm(x_man_test)
# GPR model
m_gauss = gpflow.gpr.GPR(x_man, y_train, kern=k_gauss, mean_function=None)
# Optimization of the model parameters
m_gauss.optimize()
# Compute posterior samples
# Does not always work due to Cholesky decomposition used in gpflow
# nb_samples_post = 10
# posterior_samples = m.predict_f_samples(y_man_test.T, nb_samples_post)
# Prediction
mean_est_gauss, cov_est_gauss = m_gauss.predict_f_full_cov(x_man_test)
# mean, cov = m.predict_y(x_new) # includes noise variance (seems not to be included in predict_f functions
var_est_gauss = np.diag(cov_est_gauss[0])[:, None]
# Error computation
error_gauss = np.sqrt(np.sum((y_test - mean_est_gauss) ** 2) / nb_data_test)
print('Estimation error (Manifold-RBF kernel) = ', error_gauss)
# Plot test data
fig = plt.figure(figsize=(5, 5))
plot_gaussian_process_prediction(fig, mu_test_fct, x_man_test, mean_est_gauss, mu_test_fct, r'Manifold-RBF kernel')
# ### Laplace kernel
# Define the kernel
k_laplace = SphereLaplaceKernel(input_dim=dim, active_dims=range(dim), beta=10.0, variance=1.)
# Kernel computation
K1 = k_laplace.compute_K_symm(x_man)
K12 = k_laplace.compute_K(x_man, x_man_test)
K2 = k_laplace.compute_K_symm(x_man_test)
# GPR model
m_laplace = gpflow.gpr.GPR(x_man, y_train, kern=k_laplace, mean_function=None)
# Optimization of the model parameters
m_laplace.optimize()
# Compute posterior samples
# Does not always work due to Cholesky decomposition used in gpflow
# nb_samples_post = 10
# posterior_samples = m.predict_f_samples(y_man_test.T, nb_samples_post)
# Prediction
mean_est_laplace, cov_est_laplace = m_laplace.predict_f_full_cov(x_man_test)
# mean, cov = m.predict_y(x_new) # includes noise variance (seems not to be included in predict_f functions
var_est_laplace = np.diag(cov_est_laplace[0])[:, None]
# Error computation
error_laplace = np.sqrt(np.sum((y_test - mean_est_laplace) ** 2) / nb_data_test)
print('Estimation error (Laplace kernel) = ', error_laplace)
# Plot test data
fig = plt.figure(figsize=(5, 5))
plot_gaussian_process_prediction(fig, mu_test_fct, x_man_test, mean_est_laplace, mu_test_fct, r'Laplace kernel')
# ### Euclidean RBF
# Define the kernel
k_eucl = gpflow.kernels.RBF(input_dim=dim, ARD=False)
# Kernel computation
K1 = k_eucl.compute_K_symm(x_man)
K12 = k_eucl.compute_K(x_man, x_man_test)
K2 = k_eucl.compute_K_symm(x_man_test)
# GPR model
m_eucl = gpflow.gpr.GPR(x_man, y_train, kern=k_eucl, mean_function=None)
# Optimization of the model parameters
m_eucl.optimize()
# Compute posterior samples
# Does not always work due to Cholesky decomposition used in gpflow
# nb_samples_post = 10
# posterior_samples = m.predict_f_samples(y_man_test.T, nb_samples_post)
# Prediction
mean_est_eucl, cov_est_eucl = m_eucl.predict_f_full_cov(x_man_test)
# mean, cov = m_eucl.predict_y(x_new) # includes noise variance (seems not to be included in predict_f functions
var_est_eucl = np.diag(cov_est_eucl[0])[:, None]
# Error computation
error_eucl = np.sqrt(np.sum((y_test - mean_est_eucl) ** 2) / nb_data_test)
print('Estimation error (Euclidean-RBF kernel) = ', error_eucl)
# Plot test data
fig = plt.figure(figsize=(5, 5))
plot_gaussian_process_prediction(fig, mu_test_fct, x_man_test, mean_est_eucl, mu_test_fct, r'Euclidean-RBF kernel')
plt.show()
| 2.390625 | 2 |
lintuasema-backend/app/api/classes/location/views.py | Lintuasemasovellus/lintuasemasovellus | 0 | 12788457 | from flask import render_template, request, redirect, url_for, jsonify, json
from flask_login import login_required
from app.api.classes.location.models import Location
from app.api.classes.observatory.models import Observatory
from app.api.classes.type.models import Type
from app.api.classes.observatory.services import getAll, getObservatoryId
from app.api.classes.location.services import getLocationsAndTypes, getAllLocations, editLocation
from app.api import bp
from app.db import db
import os
import json
@bp.route('/api/addLocation', methods=['POST'])
@login_required
def add_location():
req = request.get_json()
location = Location(name=req['name'], observatory_id=req['observatory_id'])
db.session().add(location)
db.session().commit()
return req
@bp.route('/api/getLocationsAndTypes/<observatory_name>', methods=['GET'])
#@login_required
def list_locations(observatory_name):
res = getLocationsAndTypes(observatory_name)
return res
@bp.route('/api/getLocations/', methods=['GET'])
#@login_required
def get_all_locations():
res =getAllLocations()
return res
#alustava route lokaation nimen muokkaamiseen
@bp.route("/api/edit/<observatoryname>/<locationname>", methods=["POST"])
@login_required
def edit_location(observatoryname, locationname):
req = editLocation(observatoryname, locationname)
return req | 2.484375 | 2 |
cqu_jxgl/cli.py | zombie110year/cqu_jxgl | 0 | 12788458 | import re
from datetime import datetime
from getpass import getpass
from sys import argv
from . import __version__
from .data.time import 沙坪坝校区作息时间, 虎溪校区作息时间
from .app import App
class CommandParser:
def __init__(self):
self.username = input("用户名: ").strip()
self.password = getpass("密码: ").strip()
self.term = self.getTerm()
self.startdate = self.getStartDate()
self.作息时间 = self.get作息时间()
def getTerm(self) -> int:
term = input("学期号: ").strip()
if re.fullmatch(r"(?P<year>\d{4,})(?P<term>\d)", term):
return term
else:
raise ValueError(f"{term} 不是一个有效的学期号, 应为类似于 20190 这样的数字")
def getStartDate(self) -> datetime:
date = input("学期开始日期: ").strip()
m = re.fullmatch(
r"(?P<year>\d{4,})(?P<month>\d{2})(?P<day>\d{2})", date)
if m:
year = int(m["year"])
mon = int(m["month"])
day = int(m["day"])
date = datetime(year, mon, day)
return date
else:
raise ValueError(f"{date} 不是有效的日期号,应为类似于 20190101 这样的数字")
def get作息时间(self) -> dict:
print("选择作息时间:")
print("1) 沙坪坝校区")
print("2) 虎溪校区")
code = int(input("1|2> ").strip())
assert code in [1, 2]
choice = {
1: 沙坪坝校区作息时间,
2: 虎溪校区作息时间
}[code]
return choice
@staticmethod
def help():
print((
"Usage: cqu_schedule\n"
" 登录并获取学生课程表 ics 文件"
))
def main():
if len(argv) == 1:
args = CommandParser()
app = App(username=args.username, password=args.password)
app.writeICS(args.term, args.startdate, args.作息时间)
else:
CommandParser.help()
| 3.21875 | 3 |
src/crawler.py | qiuhy/dl | 0 | 12788459 | <filename>src/crawler.py
# -*- coding: utf-8 -*-
"""
Created on 2017-05-05
@author: hy_qiu
"""
import json
import logging
import os
import urllib.parse
import urllib.request
from src.util.wraps import retry
TIMEOUT = 15
def file_copy2path(fn, pn):
if not os.path.exists(fn) or not os.path.isfile(fn):
return ''
if not os.path.exists(pn):
os.makedirs(pn)
nfn = os.path.join(pn, os.path.split(fn)[1])
open(nfn, "wb").write(open(fn, "rb").read())
return nfn
def file_move2path(fn, pn):
nfn = file_copy2path(fn, pn)
if nfn != '':
os.remove(fn)
return nfn
def get_response(url, values=None):
host = url.split('/')[2].strip()
headers = {'Host': host,
'User-Agent': 'Mozilla/5.0'}
if values:
postdata = urllib.parse.urlencode(values).encode('utf-8')
else:
postdata = None
request = urllib.request.Request(url, postdata, headers)
response = urllib.request.urlopen(request, timeout=TIMEOUT)
return response
def get_read(url, values=None, charset=None):
r = get_response(url, values)
if charset is None:
ctv = r.getheader('Content-Type')
if ctv:
# 'Content-Type application/json; charset=utf-8'
for t in ctv.split(';'):
if t.strip().lower().startswith('charset='):
charset = t.strip()[8:]
break
if charset:
return r.read().decode(charset)
else:
return r.read()
def get_json(url, values=None, charset=None):
return json.loads(get_read(url, values, charset))
class Crawler:
def __init__(self, host='', code='', name='', logger=None):
self.host = host
self.code = code
self.name = name
if logger:
self.logger = logger
else:
self.logger = logging.getLogger(code)
self.logger.setLevel(logging.DEBUG)
fh = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s-%(levelname)-8s %(message)s')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
return
def get_hosturl(self, url):
if self.host is None or self.host == '':
return url
else:
return 'http://{}/{}'.format(self.host, url)
@retry()
def get_response(self, url, values=None):
return get_response(self.get_hosturl(url), values)
@retry()
def get_json(self, url, values=None, charset=None):
return get_json(self.get_hosturl(url), values, charset)
@retry()
def get_read(self, url, values=None, charset=None):
return get_read(self.get_hosturl(url), values, charset)
| 2.71875 | 3 |
results/urls.py | Wellheor1/l2 | 10 | 12788460 | from django.urls import path
from django.views.generic import TemplateView
from . import views
urlpatterns = [
path('search/directions', views.results_search_directions),
path('enter', views.enter),
path('get', views.result_get),
path('pdf', views.result_print),
path('preview', TemplateView.as_view(template_name='dashboard/results_preview.html')),
path('results', TemplateView.as_view(template_name='dashboard/results.html')),
path('journal', views.result_journal_print),
path('journal_table', views.result_journal_table_print),
path('filter', views.result_filter),
path('day', views.get_day_results),
]
| 1.789063 | 2 |
VoidFinder/vast/voidfinder/volume_cut.py | DESI-UR/VAST | 5 | 12788461 | <reponame>DESI-UR/VAST
#imports
import numpy as np
import os
import sys
import mmap
import struct
import socket
import select
import atexit
import signal
import tempfile
import multiprocessing
from psutil import cpu_count
from astropy.table import Table
from multiprocessing import Queue, Process, RLock, Value, Array
from ctypes import c_int64, c_double, c_float
from .voidfinder_functions import in_mask, not_in_mask
from .hole_combine import spherical_cap_volume
from ._voidfinder_cython_find_next import not_in_mask as nim_cython
from ._vol_cut_cython import _check_holes_mask_overlap, _check_holes_mask_overlap_2
from ._voidfinder import process_message_buffer
import time
# function to find which spheres stick out of the mask
def max_range_check(spheres_table, direction, sign, survey_mask, mask_resolution, r_limits):
'''
Given the list of potential hole locations and their radii in spheres_table,
and an axes x,y,z and direction +/-, add the radii of each hole to the hole
location and check if that location is within the mask.
Returns a boolean array of length N where True indicates the location is valid.
'''
#print("Max Range Check", direction, sign, "hole_table ID: ", id(spheres_table))
#print(spheres_table['x'][0])
if sign == '+':
spheres_table[direction] += spheres_table['radius']
else:
spheres_table[direction] -= spheres_table['radius']
#print(spheres_table['x'][0])
#print(spheres_table)
boolean = in_mask(spheres_table, survey_mask, mask_resolution, r_limits)
return boolean
def check_coordinates(coord, direction, sign, survey_mask, mask_resolution, r_limits):
dr = 0
check_coord = coord
#mask_check = True
mask_check2 = False
#mask_check3 = False
#print(id(check_coord), id(coord))
np_check_coord = np.empty((1,3), dtype=np.float64)
np_check_coord[0,0] = coord['x']
np_check_coord[0,1] = coord['y']
np_check_coord[0,2] = coord['z']
if direction == 'x':
np_dir = 0
elif direction == 'y':
np_dir = 1
elif direction == 'z':
np_dir = 2
#out_log = open("VF_DEBUG_volume_cut.txt", 'a')
#while dr < coord['radius'] and mask_check:
while dr < coord['radius'] and not mask_check2:
dr += 1
if sign == '+':
# check_coord[direction] = coord[direction] + dr
np_check_coord[0,np_dir] = np_check_coord[0,np_dir] + dr
else:
# check_coord[direction] = coord[direction] - dr
np_check_coord[0,np_dir] = np_check_coord[0,np_dir] - dr
#mask_check = in_mask(check_coord, survey_mask, mask_resolution, r_limits)
mask_check2 = nim_cython(np_check_coord, survey_mask, mask_resolution, r_limits[0], r_limits[1])
#mask_check3 = not_in_mask(np_check_coord, survey_mask, mask_resolution, r_limits[0], r_limits[1])
#if mask_check == mask_check3: # or \
# mask_check != mask_check3 or \
#if mask_check2 != mask_check3:
#out_log.write(str(check_coord)+"\n")
#out_log.write(str(np_check_coord)+","+str(mask_check)+","+str(mask_check2)+","+str(mask_check3)+"\n")
#out_log.close()
height_i = check_coord['radius'] - dr
cap_volume_i = spherical_cap_volume(check_coord['radius'], height_i)
sphere_volume = np.pi*(4/3)*(check_coord['radius']**3)
return cap_volume_i, sphere_volume
def volume_cut(hole_table, survey_mask, mask_resolution, r_limits):
#print("Vol cut hole_table ID: ", id(hole_table))
#print(hole_table['x'][0])
# xpos, xneg, etc are True when the hole center + hole_radius in that direction
# is within the mask
xpos = max_range_check(Table(hole_table), 'x', '+', survey_mask, mask_resolution, r_limits)
xneg = max_range_check(Table(hole_table), 'x', '-', survey_mask, mask_resolution, r_limits)
ypos = max_range_check(Table(hole_table), 'y', '+', survey_mask, mask_resolution, r_limits)
yneg = max_range_check(Table(hole_table), 'y', '-', survey_mask, mask_resolution, r_limits)
zpos = max_range_check(Table(hole_table), 'z', '+', survey_mask, mask_resolution, r_limits)
zneg = max_range_check(Table(hole_table), 'z', '-', survey_mask, mask_resolution, r_limits)
comb_bool = np.logical_and.reduce((xpos, xneg, ypos, yneg, zpos, zneg))
#print("Comb bool: ", np.sum(comb_bool))
false_indices = np.where(comb_bool == False)
out_spheres_indices = []
for i in false_indices[0]:
not_removed = True
coord = hole_table[i]
# Check x-direction
if not xpos[i]:
cap_volume, sphere_volume = check_coordinates(Table(coord), 'x', '+', survey_mask, mask_resolution, r_limits)
if cap_volume > 0.1*sphere_volume:
out_spheres_indices.append(i)
not_removed = False
elif xneg[i] == False and not_removed:
cap_volume, sphere_volume = check_coordinates(Table(coord), 'x', '-', survey_mask, mask_resolution, r_limits)
if cap_volume > 0.1*sphere_volume:
out_spheres_indices.append(i)
not_removed = False
# Check y-direction
if ypos[i] == False and not_removed:
cap_volume, sphere_volume = check_coordinates(Table(coord), 'y', '+', survey_mask, mask_resolution, r_limits)
if cap_volume > 0.1*sphere_volume:
out_spheres_indices.append(i)
not_removed = False
elif yneg[i] == False and not_removed:
cap_volume, sphere_volume = check_coordinates(Table(coord), 'y', '-', survey_mask, mask_resolution, r_limits)
if cap_volume > 0.1*sphere_volume:
out_spheres_indices.append(i)
not_removed = False
# Check z-direction
if zpos[i] == False and not_removed:
cap_volume, sphere_volume = check_coordinates(Table(coord), 'z', '+', survey_mask, mask_resolution, r_limits)
if cap_volume > 0.1*sphere_volume:
out_spheres_indices.append(i)
not_removed = False
elif zneg[i] == False and not_removed:
cap_volume, sphere_volume = check_coordinates(Table(coord), 'z', '-', survey_mask, mask_resolution, r_limits)
if cap_volume > 0.1*sphere_volume:
out_spheres_indices.append(i)
not_removed = False
out_spheres_indices = np.unique(out_spheres_indices)
if len(out_spheres_indices) > 0:
hole_table.remove_rows(out_spheres_indices)
return hole_table
def check_hole_bounds(x_y_z_r_array,
mask,
mask_resolution,
r_limits,
cut_pct=0.1,
pts_per_unit_volume=3,
num_surf_pts=20,
num_cpus=1,
verbose=0):
"""
Description
===========
Remove holes from the output of _hole_finder() whose volume falls outside
of the mask by X % or more.
This is accomplished by a 2-phase approach, first, N points are distributed
on the surface of each sphere, and those N points are checked against the
mask. If any of those N points fall outside the mask, the percentage of the
volume of the sphere which falls outside the mask is calculated by using a
monte-carlo-esque method whereby the hole in question is filled with points
corresponding to some minimum density, and each of those points is checked.
The percentage of volume outside the mask is then approximated as the
percentage of those points which fall outside the mask.
Parameters
==========
x_y_z_r_array : numpy.ndarray of shape (N,4)
x,y,z locations of the holes, and radius, in that order
mask : numpy.ndarray of shape (K,L) dtype np.uint8
the mask used, mask[ra_integer,dec_integer] returns True if that ra,dec
position is within the survey, and false if it is not. Note ra,dec must
be converted into integer values depending on the mask_resolution. For
mask_resolution of 1, ra is in [0,359] and dec in [-90,90], for
mask_resolution of 2, ra is in [0,719], dec in [-180,180] etc.
mask_resolution : int
value of 1 indicates each entry in the mask accounts for 1 degree, value
of 2 means half-degree, 4 means quarter-degree increments, etc
r_limits : 2-tuple (min_r, max_r)
min and max radius limits of the survey
cut_pct : float in [0,1)
if this fraction of a hole volume overlaps with the mask, discard that
hole
num_surf_pts : int
distribute this many points on the surface of each sphere and check them
against the mask before doing the monte-carlo volume calculation.
num_cpus : int
number of processes to use
Returns
=======
valid_index : numpy.ndarray shape (N,)
boolean array of length corresponding to input x_y_z_r_array
True if hole is within bounds, False is hole falls outside
the mask too far based on the cut_pct criteria
monte_index : numpy.ndarray of shape (N,)
boolean array - True if the current point underwent
the additional monte-carlo analysis, and False if all the points
on the shell were inside the mask and therefore no volume
analysis was necessary
"""
if num_cpus == 1:
valid_index, monte_index = oob_cut_single(x_y_z_r_array,
mask,
mask_resolution,
r_limits,
cut_pct,
pts_per_unit_volume,
num_surf_pts)
else:
valid_index, monte_index = oob_cut_multi(x_y_z_r_array,
mask,
mask_resolution,
r_limits,
cut_pct,
pts_per_unit_volume,
num_surf_pts,
num_cpus,
verbose=verbose)
return valid_index, monte_index
def oob_cut_single(x_y_z_r_array,
mask,
mask_resolution,
r_limits,
cut_pct,
pts_per_unit_volume,
num_surf_pts):
"""
Out-Of-Bounds cut single threaded version.
"""
valid_index = np.ones(x_y_z_r_array.shape[0], dtype=np.uint8)
monte_index = np.zeros(x_y_z_r_array.shape[0], dtype=np.uint8)
############################################################################
# Distrubute N points on a unit sphere
# Reference algorithm "Golden Spiral" method:
# https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere
#---------------------------------------------------------------------------
indices = np.arange(0, num_surf_pts, dtype=float) + 0.5
phi = np.arccos(1 - 2*indices/num_surf_pts)
theta = np.pi * (1 + 5**0.5) * indices
x = np.cos(theta) * np.sin(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(phi)
unit_sphere_pts = np.empty((num_surf_pts, 3), dtype=np.float64)
unit_sphere_pts[:,0] = x
unit_sphere_pts[:,1] = y
unit_sphere_pts[:,2] = z
############################################################################
############################################################################
# Find the largest radius hole in the results, and generate a mesh of
# constant density such that the largest hole will fit in this mesh
#
# Cut the extraneous points, and sort all the points in order of smallest
# radius to largest radius so when we iterate later for the smaller holes we
# can stop early at the largest necessary radius - the cythonized code
# critically depends on this sort
#---------------------------------------------------------------------------
largest_radius = x_y_z_r_array[:,3].max()
gen_radius = largest_radius*1.05 #add a bit of margin for the mesh
step = 1.0/np.power(pts_per_unit_volume, .33)
mesh_pts = np.arange(-1.0*gen_radius, gen_radius, step)
n_pts = mesh_pts.shape[0]
mesh_x, mesh_y, mesh_z = np.meshgrid(mesh_pts, mesh_pts, mesh_pts)
mesh_points = np.concatenate((mesh_x.ravel().reshape(n_pts**3, 1),
mesh_y.ravel().reshape(n_pts**3, 1),
mesh_z.ravel().reshape(n_pts**3, 1)), axis=1)
mesh_point_radii = np.linalg.norm(mesh_points, axis=1)
keep_idx = mesh_point_radii < largest_radius
mesh_points = mesh_points[keep_idx]
mesh_point_radii = mesh_point_radii[keep_idx]
sort_order = mesh_point_radii.argsort()
mesh_points = mesh_points[sort_order]
mesh_points_radii = mesh_point_radii[sort_order]
############################################################################
############################################################################
# Iterate through our holes
#---------------------------------------------------------------------------
_check_holes_mask_overlap(x_y_z_r_array,
#_check_holes_mask_overlap_2(x_y_z_r_array,
mask,
mask_resolution,
r_limits[0],
r_limits[1],
unit_sphere_pts,
mesh_points,
mesh_points_radii,
cut_pct,
valid_index,
monte_index)
############################################################################
'''
for idx, curr_hole in enumerate(x_y_z_r_array):
#if idx%100 == 0:
# print(idx)
curr_hole_position = curr_hole[0:3]
curr_hole_radius = curr_hole[3]
################################################################################
# First, check the shell points to see if we need to do the monte carlo
# volume
################################################################################
curr_sphere_pts = curr_hole_radius*unit_sphere_pts + curr_hole_position
require_monte_carlo = False
for curr_sphere_edge_pt in curr_sphere_pts:
not_in_mask = nim_cython(curr_sphere_edge_pt.reshape(1,3), mask, mask_resolution, r_limits[0], r_limits[1])
if not_in_mask:
require_monte_carlo = True
break
################################################################################
# Do the monte carlo if any of the shell points failed
################################################################################
if require_monte_carlo:
#print("REQ MONT")
monte_index[idx] = True
total_checked_pts = 0
total_outside_mask = 0
for jdx, (mesh_pt, mesh_pt_radius) in enumerate(zip(mesh_points, mesh_points_radii)):
if mesh_pt_radius > curr_hole_radius:
break
check_pt = curr_hole_position + mesh_pt
not_in_mask = nim_cython(check_pt.reshape(1,3), mask, mask_resolution, r_limits[0], r_limits[1])
if not_in_mask:
total_outside_mask += 1
total_checked_pts += 1
vol_pct_outside = float(total_outside_mask)/float(total_checked_pts)
if vol_pct_outside > cut_pct:
valid_index[idx] = False
else:
#do nothing, the hole is valid
pass
'''
return valid_index.astype(np.bool), monte_index.astype(np.bool)
def oob_cut_multi(x_y_z_r_array,
mask,
mask_resolution,
r_limits,
cut_pct,
pts_per_unit_volume,
num_surf_pts,
num_cpus,
batch_size=1000,
verbose=0,
print_after=5.0,
SOCKET_PATH="/tmp/voidfinder2.sock",
RESOURCE_DIR="/dev/shm"):
"""
Out-Of-Bounds cut multi processed version.
"""
num_holes = x_y_z_r_array.shape[0]
valid_index = np.ones(num_holes, dtype=np.uint8)
monte_index = np.zeros(num_holes, dtype=np.uint8)
############################################################################
# Distrubute N points on a unit sphere
# Reference algorithm "Golden Spiral" method:
# https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere
############################################################################
indices = np.arange(0, num_surf_pts, dtype=float) + 0.5
phi = np.arccos(1 - 2*indices/num_surf_pts)
theta = np.pi * (1 + 5**0.5) * indices
x = np.cos(theta) * np.sin(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(phi)
unit_sphere_pts = np.empty((num_surf_pts, 3), dtype=np.float64)
unit_sphere_pts[:,0] = x
unit_sphere_pts[:,1] = y
unit_sphere_pts[:,2] = z
############################################################################
# Find the largest radius hole in the results, and generate a mesh of
# constant density such that the largest hole will fit in this mesh
#
# Cut the extraneous points, and sort all the points in order of smallest
# radius to largest radius so when we iterate later for the smaller holes
# we can stop early at the largest necessary radius
############################################################################
largest_radius = x_y_z_r_array[:,3].max()
gen_radius = largest_radius*1.05 #add a bit of margin for the mesh
step = 1.0/np.power(pts_per_unit_volume, .33)
mesh_pts = np.arange(-1.0*gen_radius, gen_radius, step)
n_pts = mesh_pts.shape[0]
mesh_x, mesh_y, mesh_z = np.meshgrid(mesh_pts, mesh_pts, mesh_pts)
mesh_points = np.concatenate((mesh_x.ravel().reshape(n_pts**3, 1),
mesh_y.ravel().reshape(n_pts**3, 1),
mesh_z.ravel().reshape(n_pts**3, 1)), axis=1)
mesh_points_radii = np.linalg.norm(mesh_points, axis=1)
keep_idx = mesh_points_radii < largest_radius
mesh_points = mesh_points[keep_idx]
mesh_points_radii = mesh_points_radii[keep_idx]
sort_order = mesh_points_radii.argsort()
mesh_points = mesh_points[sort_order]
mesh_points_radii = mesh_points_radii[sort_order]
mesh_points = mesh_points.astype(np.float64)
mesh_points_radii = mesh_points_radii.astype(np.float64)
num_mesh_points = mesh_points.shape[0]
############################################################################
# If /dev/shm is not available, use /tmp as the shared resource filesystem
# location instead. Since on Linux /dev/shm is guaranteed to be a mounted
# RAMdisk, I don't know if /tmp will be as fast or not, probably depends on
# kernel settings.
############################################################################
if not os.path.isdir(RESOURCE_DIR):
print("WARNING: RESOURCE DIR ", RESOURCE_DIR, "does not exist. Falling back to /tmp but could be slow", flush=True)
RESOURCE_DIR = "/tmp"
############################################################################
# Start by converting the num_cpus argument into the real value we will use
# by making sure its reasonable, or if it was none use the max val available
#
# Maybe should use psutil.cpu_count(logical=False) instead of the
# multiprocessing version?
############################################################################
if (num_cpus is None):
num_cpus = cpu_count(logical=False)
if verbose > 0:
print("Running hole cut in multi-process mode,", str(num_cpus), "cpus", flush=True)
############################################################################
#
############################################################################
xyzr_fd, XYZR_BUFFER_PATH = tempfile.mkstemp(prefix="voidfinder", dir=RESOURCE_DIR, text=False)
if verbose > 0:
print("XYZR MEMMAP PATH: ", XYZR_BUFFER_PATH, xyzr_fd, flush=True)
xyzr_buffer_length = num_holes*4*8 # n by 4 by 8 per float64
os.ftruncate(xyzr_fd, xyzr_buffer_length)
xyzr_buffer = mmap.mmap(xyzr_fd, xyzr_buffer_length)
xyzr_buffer.write(x_y_z_r_array.tobytes())
del x_y_z_r_array
x_y_z_r_array = np.frombuffer(xyzr_buffer, dtype=np.float64)
x_y_z_r_array.shape = (num_holes,4)
os.unlink(XYZR_BUFFER_PATH)
############################################################################
#
# Memmaps for valid_idx, monte_idx, unit_sphere_pts, mesh_points, mesh_points_radii
# and x_y_z_r_array
#
#
############################################################################
valid_idx_fd, VALID_IDX_BUFFER_PATH = tempfile.mkstemp(prefix="voidfinder", dir=RESOURCE_DIR, text=False)
if verbose > 0:
print("VALID_IDX MEMMAP PATH: ", VALID_IDX_BUFFER_PATH, valid_idx_fd, flush=True)
valid_idx_buffer_length = num_holes*1 # 1 per uint8
os.ftruncate(valid_idx_fd, valid_idx_buffer_length)
valid_idx_buffer = mmap.mmap(valid_idx_fd, valid_idx_buffer_length)
valid_idx_buffer.write(valid_index.tobytes())
del valid_index
valid_index = np.frombuffer(valid_idx_buffer, dtype=np.uint8)
valid_index.shape = (num_holes,)
os.unlink(VALID_IDX_BUFFER_PATH)
############################################################################
#
############################################################################
monte_idx_fd, MONTE_IDX_BUFFER_PATH = tempfile.mkstemp(prefix="voidfinder", dir=RESOURCE_DIR, text=False)
if verbose > 0:
print("MONTE_IDX MEMMAP PATH: ", MONTE_IDX_BUFFER_PATH, monte_idx_fd, flush=True)
monte_idx_buffer_length = num_holes*1 # 1 per uint8
os.ftruncate(monte_idx_fd, monte_idx_buffer_length)
monte_idx_buffer = mmap.mmap(monte_idx_fd, monte_idx_buffer_length)
monte_idx_buffer.write(monte_index.tobytes())
del monte_index
monte_index = np.frombuffer(monte_idx_buffer, dtype=np.uint8)
monte_index.shape = (num_holes,)
os.unlink(MONTE_IDX_BUFFER_PATH)
############################################################################
#
############################################################################
unit_sphere_fd, UNIT_SHELL_BUFFER_PATH = tempfile.mkstemp(prefix="voidfinder", dir=RESOURCE_DIR, text=False)
if verbose > 0:
print("UNIT SHELL MEMMAP PATH: ", UNIT_SHELL_BUFFER_PATH, unit_sphere_fd, flush=True)
unit_sphere_buffer_length = num_surf_pts*3*8 # n by 3 by 8 per float64
os.ftruncate(unit_sphere_fd, unit_sphere_buffer_length)
unit_sphere_buffer = mmap.mmap(unit_sphere_fd, unit_sphere_buffer_length)
unit_sphere_buffer.write(unit_sphere_pts.tobytes())
del unit_sphere_pts
unit_sphere_pts = np.frombuffer(unit_sphere_buffer, dtype=np.float64)
unit_sphere_pts.shape = (num_surf_pts, 3)
os.unlink(UNIT_SHELL_BUFFER_PATH)
############################################################################
#
############################################################################
mesh_pts_fd, MESH_PTS_BUFFER_PATH = tempfile.mkstemp(prefix="voidfinder", dir=RESOURCE_DIR, text=False)
if verbose > 0:
print("MESH PTS MEMMAP PATH: ", MESH_PTS_BUFFER_PATH, mesh_pts_fd, flush=True)
mesh_pts_buffer_length = num_mesh_points*3*8 # n by 3 by 8 per float64
os.ftruncate(mesh_pts_fd, mesh_pts_buffer_length)
mesh_pts_buffer = mmap.mmap(mesh_pts_fd, mesh_pts_buffer_length)
mesh_pts_buffer.write(mesh_points.tobytes())
del mesh_points
mesh_points = np.frombuffer(mesh_pts_buffer, dtype=np.float64)
mesh_points.shape = (num_mesh_points, 3)
os.unlink(MESH_PTS_BUFFER_PATH)
############################################################################
#
############################################################################
mesh_radii_fd, MESH_RADII_BUFFER_PATH = tempfile.mkstemp(prefix="voidfinder", dir=RESOURCE_DIR, text=False)
if verbose > 0:
print("MESH RADII MEMMAP PATH: ", MESH_RADII_BUFFER_PATH, mesh_radii_fd, flush=True)
mesh_radii_buffer_length = num_mesh_points*8 # n by 3 by 8 per float64
os.ftruncate(mesh_radii_fd, mesh_radii_buffer_length)
mesh_radii_buffer = mmap.mmap(mesh_radii_fd, mesh_radii_buffer_length)
if verbose > 0:
print(mesh_radii_buffer_length, len(mesh_points_radii.tobytes()), flush=True)
mesh_radii_buffer.write(mesh_points_radii.tobytes())
del mesh_points_radii
mesh_points_radii = np.frombuffer(mesh_radii_buffer, dtype=np.float64)
mesh_points_radii.shape = (num_mesh_points,)
os.unlink(MESH_RADII_BUFFER_PATH)
############################################################################
#
############################################################################
index_start = Value(c_int64, 0, lock=True)
num_cells_processed = 0
############################################################################
#
############################################################################
config_object = {"SOCKET_PATH" : SOCKET_PATH,
"batch_size" : batch_size,
"mask" : mask,
"mask_resolution" : mask_resolution,
"min_dist" : r_limits[0],
"max_dist" : r_limits[1],
"cut_pct" : cut_pct,
"XYZR_BUFFER_PATH" : XYZR_BUFFER_PATH,
"xyzr_fd" : xyzr_fd,
"num_holes" : num_holes,
"VALID_IDX_BUFFER_PATH" : VALID_IDX_BUFFER_PATH,
"valid_idx_fd" : valid_idx_fd,
"MONTE_IDX_BUFFER_PATH" : MONTE_IDX_BUFFER_PATH,
"monte_idx_fd" : monte_idx_fd,
"UNIT_SHELL_BUFFER_PATH" : UNIT_SHELL_BUFFER_PATH,
"unit_sphere_fd" : unit_sphere_fd,
"num_surf_pts" : num_surf_pts,
"MESH_PTS_BUFFER_PATH" : MESH_PTS_BUFFER_PATH,
"mesh_pts_fd" : mesh_pts_fd,
"num_mesh_points" : num_mesh_points,
"MESH_RADII_BUFFER_PATH" : MESH_RADII_BUFFER_PATH,
"mesh_radii_fd" : mesh_radii_fd,
}
############################################################################
# Start the worker processes
#
# For whatever reason, OSX doesn't define the socket.SOCK_CLOEXEC constants
# so check for that attribute on the socket module before opening the
# listener socket. Not super critical, but the child processes don't need a
# file descriptor for the listener socket so I was trying to be clean and
# have it "close on exec"
############################################################################
if hasattr(socket, "SOCK_CLOEXEC"):
listener_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM | socket.SOCK_CLOEXEC)
else:
listener_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
listener_socket.bind(SOCKET_PATH)
listener_socket.listen(num_cpus)
startup_context = multiprocessing.get_context("fork")
processes = []
for proc_idx in range(num_cpus):
#p = startup_context.Process(target=_main_hole_finder_startup, args=(proc_idx, CONFIG_PATH))
p = startup_context.Process(target=_oob_cut_worker,
args=(proc_idx,
index_start,
config_object))
'''
p = startup_context.Process(target=_hole_finder_worker_profile,
args=(proc_idx,
ijk_start,
write_start,
config_object))
'''
p.start()
processes.append(p)
worker_start_time = time.time()
############################################################################
# Make sure each worker process connects to the main socket, so we block on
# the accept() call below until we get a connection, and make sure we get
# exactly num_cpus connections.
#
# To avoid waiting for hours and hours without getting a successful socket
# connection, we set the timeout to the reasonably high value of 10.0 seconds
# (remember, 0.1 seconds is on the order of 100 million cycles for a 1GHz
# processor), and if we don't get a connection within that time frame we're
# going to intentionally raise a RunTimeError
#
# If successful, we save off references to our new worker sockets by their
# file descriptor integer value so we can refer to them by that value using
# select() later, then shut down and close our listener/server socket since
# we're done with it.
############################################################################
if verbose > 0:
print("Attempting to connect workers for volume cut/out of bounds check",
flush=True)
num_active_processes = 0
worker_sockets = []
message_buffers = []
socket_index = {}
all_successful_connections = True
listener_socket.settimeout(10.0)
for idx in range(num_cpus):
try:
worker_sock, worker_addr = listener_socket.accept()
except:
all_successful_connections = False
break
worker_sockets.append(worker_sock)
num_active_processes += 1
message_buffers.append(b"")
socket_index[worker_sock.fileno()] = idx
if verbose > 0:
if all_successful_connections:
print("Worker processes time to connect:",
time.time() - worker_start_time,
flush=True)
# This try-except clause was added for weird behavior on mac/OSX
try:
listener_socket.shutdown(socket.SHUT_RDWR)
except:
pass
listener_socket.close()
os.unlink(SOCKET_PATH)
if not all_successful_connections:
for worker_sock in worker_sockets:
worker_sock.send(b"exit")
print("FAILED TO CONNECT ALL WORKERS SUCCESSFULLY, EXITING")
raise RunTimeError("Worker sockets failed to connect properly")
############################################################################
# LOOP TO LISTEN FOR RESULTS WHILE WORKERS WORKING
# This loop has 3 primary jobs
# 1). accumulate results from reading the worker sockets
# 2). periodically print the status/results from the workers
# 3). Save checkpoint files after every 'safe_after' results
############################################################################
if verbose > 0:
print_after_time = time.time()
main_task_start_time = time.time()
empty1 = []
empty2 = []
select_timeout = 2.0
sent_exit_commands = False
while num_active_processes > 0:
########################################################################
# Print status updates if verbose is on
########################################################################
if verbose > 0:
curr_time = time.time()
if (curr_time - print_after_time) > print_after:
print('Processed', num_cells_processed,
'holes of', num_holes,
"at", str(round(curr_time-main_task_start_time,2)),
flush=True)
print_after_time = curr_time
########################################################################
# Accumulate status updates from the worker sockets
########################################################################
read_socks, empty3, empty4 = select.select(worker_sockets, empty1, empty2, select_timeout)
if read_socks:
for worker_sock in read_socks:
sock_idx = socket_index[worker_sock.fileno()]
curr_read = worker_sock.recv(1024)
curr_message_buffer = message_buffers[sock_idx]
curr_message_buffer += curr_read
messages, remaining_buffer = process_message_buffer(curr_message_buffer)
message_buffers[sock_idx] = remaining_buffer
for message in messages:
if message == b"":
continue
message_type = struct.unpack("=q", message[0:8])[0]
if message_type == 0:
num_result = struct.unpack("=q", message[8:16])[0]
#num_hole = struct.unpack("=q", message[16:24])[0]
num_cells_processed += num_result
#if ENABLE_SAVE_MODE:
# save_after_counter -= num_result
#n_holes += num_hole
elif message_type == 1:
num_active_processes -= 1
elif message_type == 2:
num_acknowledges += 1
############################################################################
# We're done the main work! Clean up worker processes. Block until we've
# joined everybody so that we know everything completed correctly.
############################################################################
if verbose > 0:
print("Vol cut finish time: ", time.time() - main_task_start_time,
flush=True)
if not sent_exit_commands:
for idx in range(num_cpus):
worker_sockets[idx].send(b"exit")
for p in processes:
p.join(None) #block till join
############################################################################
# DONE
############################################################################
return valid_index.astype(np.bool), monte_index.astype(np.bool)
def _oob_cut_worker(worker_idx, index_start, config):
SOCKET_PATH = config["SOCKET_PATH"]
batch_size = config["batch_size"]
mask = config["mask"]
mask_resolution = config["mask_resolution"]
min_dist = config["min_dist"]
max_dist = config["max_dist"]
cut_pct = config["cut_pct"]
XYZR_BUFFER_PATH = config["XYZR_BUFFER_PATH"]
xyzr_fd = config["xyzr_fd"]
num_holes = config["num_holes"]
VALID_IDX_BUFFER_PATH = config["VALID_IDX_BUFFER_PATH"]
valid_idx_fd = config["valid_idx_fd"]
MONTE_IDX_BUFFER_PATH = config["MONTE_IDX_BUFFER_PATH"]
monte_idx_fd = config["monte_idx_fd"]
UNIT_SHELL_BUFFER_PATH = config["UNIT_SHELL_BUFFER_PATH"]
unit_sphere_fd = config["unit_sphere_fd"]
num_surf_pts = config["num_surf_pts"]
MESH_PTS_BUFFER_PATH = config["MESH_PTS_BUFFER_PATH"]
mesh_pts_fd = config["mesh_pts_fd"]
num_mesh_points = config["num_mesh_points"]
MESH_RADII_BUFFER_PATH = config["MESH_RADII_BUFFER_PATH"]
mesh_radii_fd = config["mesh_radii_fd"]
############################################################################
# Open a UNIX-domain socket for communication to the master process. We set
# the timeout to be 10.0 seconds, so this worker will try notifying the
# master that it has results for up to 10.0 seconds, then it will loop again
# and check for input from the master, and if necessary wait and try to push
# results for 10 seconds again. Right now the workers only exit after a
# b'exit' message has been received from the master.
############################################################################
worker_socket = socket.socket(socket.AF_UNIX)
worker_socket.settimeout(10.0)
connect_start = time.time()
try:
worker_socket.connect(SOCKET_PATH)
except Exception as E:
print("WORKER", worker_idx, "UNABLE TO CONNECT, EXITING", flush=True)
raise E
############################################################################
#
############################################################################
xyzr_buffer_length = num_holes*4*8 # 4 for xyzr and 8 for float64
xyzr_mmap_buffer = mmap.mmap(xyzr_fd, xyzr_buffer_length)
x_y_z_r_array = np.frombuffer(xyzr_mmap_buffer, dtype=np.float64)
x_y_z_r_array.shape = (num_holes, 4)
############################################################################
#
############################################################################
valid_idx_buffer_length = num_holes*1 # uint8
valid_idx_mmap_buffer = mmap.mmap(valid_idx_fd, valid_idx_buffer_length)
valid_index = np.frombuffer(valid_idx_mmap_buffer, dtype=np.uint8)
valid_index.shape = (num_holes,)
############################################################################
#
############################################################################
monte_idx_buffer_length = num_holes*1 # uint8
monte_idx_mmap_buffer = mmap.mmap(monte_idx_fd, monte_idx_buffer_length)
monte_index = np.frombuffer(monte_idx_mmap_buffer, dtype=np.uint8)
monte_index.shape = (num_holes,)
############################################################################
#
############################################################################
unit_shell_buffer_length = num_surf_pts*3*8 # n by 3 by float64
unit_shell_mmap_buffer = mmap.mmap(unit_sphere_fd, unit_shell_buffer_length)
unit_sphere_pts = np.frombuffer(unit_shell_mmap_buffer, dtype=np.float64)
unit_sphere_pts.shape = (num_surf_pts,3)
############################################################################
#
############################################################################
mesh_pts_buffer_length = num_mesh_points*3*8 # n by 3 by float64
mesh_pts_mmap_buffer = mmap.mmap(mesh_pts_fd, mesh_pts_buffer_length)
mesh_points = np.frombuffer(mesh_pts_mmap_buffer, dtype=np.float64)
mesh_points.shape = (num_mesh_points,3)
############################################################################
#
############################################################################
mesh_radii_buffer_length = num_mesh_points*8 # n by float64
mesh_radii_mmap_buffer = mmap.mmap(mesh_radii_fd, mesh_radii_buffer_length)
mesh_points_radii = np.frombuffer(mesh_radii_mmap_buffer, dtype=np.float64)
mesh_points_radii.shape = (num_mesh_points,)
############################################################################
# Main Loop for the worker process begins here.
#
# exit_process - flag for reading an exit command off the queue
#
# document the additional below variables here please
#
# If this worker process has reached the end of the Cell ID generator, we
# want to tell the master process we're done working, and wait for an exit
# command, so increase the select_timeout from 0 (instant) to 2.0 seconds to
# allow the operating system to wake us up during that 2.0 second interval
# and avoid using unnecessary CPU
############################################################################
received_exit_command = False
exit_process = False
return_array = np.empty((batch_size, 4), dtype=np.float64)
i_j_k_array = np.empty((batch_size, 3), dtype=np.int64)
worker_sockets = [worker_socket]
empty1 = []
empty2 = []
message_buffer = b""
do_work = True
sync = False
sent_sync_ack = False
have_result_to_write = False
no_cells_left_to_process = False
sent_deactivation = False
select_timeout = 0
while not exit_process:
#total_loops += 1
########################################################################
# As the first part of the main loop, use the select() method to check
# for any messages from the master process. It may send us an "exit"
# command, to tell us to terminate, a "sync" command, to tell us to stop
# processing momentarily while it writes out a save checkpoint, or a
# "resume" command to tell us that we may continue processing after a
# "sync"
########################################################################
#print("Worker "+str(worker_idx)+" "+str(message_buffer), flush=True)
read_socks, empty3, empty4 = select.select(worker_sockets, empty1, empty2, select_timeout)
if read_socks:
message_buffer += worker_socket.recv(1024)
if len(message_buffer) > 0:
if len(message_buffer) >= 4 and message_buffer[0:4] == b'exit':
exit_process = True
received_exit_command = True
continue
elif len(message_buffer) >= 4 and message_buffer[0:4] == b"sync":
sync = True
message_buffer = message_buffer[4:]
elif len(message_buffer) >= 6 and message_buffer[0:6] == b"resume":
sync = False
sent_sync_ack = False
message_buffer = message_buffer[6:]
########################################################################
# Here we do the main work of VoidFinder. We synchronize the work with
# the other worker processes using 2 lock-protected values, 'ijk_start'
# and 'write_start'. ijk_start gives us the starting cell_ID index to
# generate the next batch of cell ID's at, and write_start gives us the
# index to write our batch of results at. Note that we will process AT
# MOST 'batch_size' indexes per loop, because we use the Galaxy Map to
# filter out cell IDs which do not need to be checked (since they have
# galaxies in them they are non-empty and we won't find a hole there).
# Since we may process LESS than batch_size locations, when we update
# 'write_start' we update it with the actual number of cells which we
# have worked in our current batch.
#
# Note if we're in 'sync' mode, we don't want to do any work since the
# master process is making a checkpoint file.
########################################################################
if do_work and not sync:
####################################################################
# Get the next index of the starting cell ID to process for our
# current batch
####################################################################
index_start.acquire()
start_idx = index_start.value
index_start.value += batch_size
index_start.release()
####################################################################
# Setup the work
####################################################################
if start_idx + batch_size <= num_holes:
num_cells_to_process = batch_size
elif start_idx + batch_size > num_holes:
num_cells_to_process = max(0, num_holes - start_idx)
if num_cells_to_process > 0:
end_idx = start_idx + num_cells_to_process
_check_holes_mask_overlap(x_y_z_r_array[start_idx:end_idx],
#_check_holes_mask_overlap_2(x_y_z_r_array[start_idx:end_idx],
mask,
mask_resolution,
min_dist,
max_dist,
unit_sphere_pts,
mesh_points,
mesh_points_radii,
cut_pct,
valid_index[start_idx:end_idx],
monte_index[start_idx:end_idx])
have_result_to_write = True
####################################################################
# If the cell_ID_generator ever returns '0', that means we've
# reached the end of the whole search grid, so this worker can
# notify the master that it is done working
####################################################################
else:
no_cells_left_to_process = True
########################################################################
# Update the master process that we have processed some number of cells,
# using our socket connection. Note the actual results get written
# directly to the shared memmap, but the socket just updates the master
# with the number of new results (an integer)
########################################################################
if have_result_to_write:
#n_hole = np.sum(np.logical_not(np.isnan(return_array[:,0])), axis=None, dtype=np.int64)
out_msg = b""
out_msg += struct.pack("b", 2) #1 byte - number of 8 byte fields
out_msg += struct.pack("=q", 0) #8 byte field - message type 0
out_msg += struct.pack("=q", num_cells_to_process) #8 byte field - payload for num-write
try:
worker_socket.send(out_msg)
except:
do_work = False
else:
do_work = True
have_result_to_write = False
########################################################################
# If we're done working (cell ID generator reached the end/returned 0),
# notify the master process that this worker is going into a "wait for
# exit" state where we just sleep and check the input socket for the
# b'exit' message
#########################################################################
if no_cells_left_to_process:
if not sent_deactivation:
out_msg = b""
out_msg += struct.pack("b", 1) #1 byte - number of 8 byte fields
out_msg += struct.pack("=q", 1) #8 byte field - message type 1 (no payload)
worker_socket.send(out_msg)
sent_deactivation = True
select_timeout = 2.0
########################################################################
# If the master process wants to save a checkpoint, it needs the workers
# to sync up. It sends a b'sync' message, and then it waits for all the
# workers to acknowledge that they have received the 'sync', so here we
# send that acknowledgement. After we've received the sync, we just
# want to sleep and check the socket for a b'resume' message.
########################################################################
if sync:
if not sent_sync_ack:
acknowledge_sync = b""
acknowledge_sync += struct.pack("b", 1) #1 byte - number of 8 byte fields
acknowledge_sync += struct.pack("=q", 2) #8 byte field - message type 2
try:
worker_socket.send(acknowledge_sync)
except:
pass
else:
sent_sync_ack = True
else:
time.sleep(1.0)
############################################################################
# We're all done! Close the socket and any other resources, and finally
# return.
############################################################################
worker_socket.close()
print("WORKER EXITING GRACEFULLY "+str(worker_idx), flush=True)
return None
| 2.28125 | 2 |
vespa/analysis/functors/funct_fidsum_coil_combine.py | vespa-mrs/vespa | 0 | 12788462 | <reponame>vespa-mrs/vespa
# Python imports
# 3rd party imports
import numpy as np
# Vespa imports
from vespa.analysis.algos.suspect_channel_combination import whiten, svd_weighting, combine_channels
COILCOMBINE_MENU_ITEMS = ['Siemens',
'CMRR',
'CMRR-Sequential',
'CMRR-Hybrid',
'SVD (suspect)',
'External Dataset',
'External Dataset with Offset']
def siemens(raw):
"""
Combine code from IceSpectroEdit program, a la <NAME>
Given a list of scans and the extracted parameter dictionary we process
the data in as similar a way to Siemens ICE program IceSpectroEdit as
we can. My way of removing oversampling seems to not match 100% but the
other steps are in the order and perform very similarly to the ICE program.
Note. Since this got moved out of the Import step, we are assuming that
the following processing steps have already been applied:
1. oversampling removal
2. global scaling to reasonable numerical range
3. (optional) complex conjugate for proper display
Input data is array with (1, ncoils, nfids, npts) dimensions
Output data array has (1, 1, nfids, npts) dimension
Output ndarray collapses ncoils dimension by combining each group of ncoils
FIDs into a weighted/phased summed FID.
Other Output:
all_weight ndarray[nfids,ncoils], float, weights calculated/used to combine
each group of ncoil FIDs into a single FID
all_phases ndarray[nfids,ncoils], complex, phases calculated/used to combine
each group of ncoil FIDs into a single FID
"""
nrep, ncoil, nfid, npts = raw.shape
dat_comb = np.zeros([nfid, npts], dtype=np.complex)
weight = np.zeros([nfid, ncoil], dtype=np.float)
phases = np.zeros([nfid, ncoil], dtype=np.complex)
flag_norm_to_sum = False # default for now
# Parse each group of FIDs for N channels for each average as separate
# from all other scans. Perform the following steps:
# - accumulate weighting factors and phases, all channels
# - collate all channels as numpy arrays
# - calc final weights for each channel
# - apply weight and phase corrections to all channels
for i in range(nfid):
chans = []
for j in range(ncoil):
chan = raw[0, j, i, :].copy()
weight[i,j] = np.abs(chan[0])
phases[i,j] = np.conjugate(chan[0]) / weight[i,j] # normalized complex conj to cancel phase
chans.append(chan)
# normalize weighting function based on spectro data
tmp = np.sum([val * val for val in weight[i]]) # sum squared values
if tmp == 0.0: tmp = 1.0
if flag_norm_to_sum:
lamda = np.sum(weight) / tmp # sum of sensitivities
else:
lamda = 1.0 / np.sqrt(tmp) # sqrt of sum of squared sensitivities
weight[i] = [wt * lamda for wt in weight[i]]
# apply weighting and phase corrections and sum
for j, chan in enumerate(chans):
dat_comb[i, :] += chan * weight[i,j] * phases[i,j]
print_combine_stats(weight, phases, method='Siemens')
return normalize_shape(dat_comb), weight, phases
def cmrr_standard(raw, delta=0.0):
"""
Derived from Matlab code from <NAME> and <NAME>, UMinn.
Coil weights and phases are calculated once from the first scan,
then these values are applied globally to all subsequent scans.
Input data is array with (1, ncoil, nfid, npts) dimensions
Output data array has (1, 1, nfid, npts) dimension
delta should be in radians (bjs - ??)
Output array collapses ncoil dimension, combining ncoil FIDs into
single eighted/phased summed FID.
Other Output:
all_weight ndarray[nfid,ncoil], float, weights calculated/used to combine
each group of ncoil FIDs into a single FID
all_phases ndarray[nfid,ncoil], complex, phases calculated/used to combine
each group of ncoil FIDs into a single FID
"""
nrep, ncoil, nfid, npts = raw.shape
xaxis = list(range(npts))
flag_norm_to_sum = False # default for now
weight = np.zeros([ncoil,], dtype=np.float)
phases = np.zeros([ncoil,], dtype=np.complex)
# --------------------------------------------------------------------------
# Calc weights and phases from first scan only
# - weight is amplitude of zero order polynomial coefficient using 9th order
# polynomial fit of FID in time domain (based on Uzay's script)
for j in range(ncoil):
chan = raw[0, j, 0, :].copy()
weight[j] = np.abs(chan[0])
phases[j] = np.conjugate(chan[0]) / weight[j] # normalized complex conj to cancel phase
coeffs = np.polyfit(xaxis, np.abs(chan), 9)
weight[j] = coeffs[-1] # last entry is amplitude - zero order coeff
# --------------------------------------------------------------------------
# normalize weighting function based on spectro data
tmp = np.sum([val * val for val in weight]) # sum squared values
if tmp == 0.0: tmp = 1.0
if flag_norm_to_sum:
lamda = np.sum(weight) / tmp # sum of sensitivities
else:
lamda = 1.0 / np.sqrt(tmp) # sqrt of sum of squared sensitivities
weight = np.array([val * lamda for val in weight])
phases = phases + delta
# --------------------------------------------------------------------------
# Apply weights and phases from first scan to all scans
all_weight = np.tile(weight, nfid)
all_phases = np.tile(phases, nfid)
all_weight.shape = (nfid, ncoil)
all_phases.shape = (nfid, ncoil)
dat_comb = raw.copy()
dat_comb *= weight.reshape(1, ncoil, 1, 1) * phases.reshape(1, ncoil, 1, 1)
dat_comb = dat_comb.sum(axis=(0,1))
print_combine_stats(all_weight, all_phases, method='CMRR')
return normalize_shape(dat_comb), all_weight, all_phases
def cmrr_sequential(raw):
"""
Combine method hybrid of Siemens and Gulin Oz CMRR
Derived from Matlab code from Dinesh. The coil weights and phases are
calculated from each scan in the scan list as in CMRR code, but are then
applied only to its own scan as in the Siemens code.
Input data is array with (1, ncoils, nfids, npts) dimensions
Output data array has (1, 1, nfids, npts) dimension
Output ndarray collapses ncoils dimension by combining each group of ncoils
FIDs into a weighted/phased summed FID.
Other Output:
all_weight ndarray[nfids,ncoils], float, weights calculated/used to combine
each group of ncoil FIDs into a single FID
all_phases ndarray[nfids,ncoils], complex, phases calculated/used to combine
each group of ncoil FIDs into a single FID
"""
nrep, ncoil, nfid, npts = raw.shape
dat_comb = np.ndarray([nfid,npts], dtype=np.complex)
all_weight = np.ndarray([nfid,ncoil], dtype=np.float)
all_phases = np.ndarray([nfid,ncoil], dtype=np.complex)
xaxis = list(range(npts))
flag_norm_to_sum = False # default for now
for i in range(nfid):
# determine weighting and phz for each coil
# zero-order phase correction
# correct for phase based on 1st point in 1st wref fid
# for each average, calc phase and weights to correct for coil geometry
chans = []
weight = []
phases = []
for j in range(ncoil):
chan = raw[0,j,i,:].copy()
magn = np.abs(chan[0])
phas = np.conjugate(chan[0])/magn # normalized complex conj to cancel phase
chan = phas * chan # Note. applying phase here NOT below as in Siemens
# amplitude of zero order phased fid in time domain
# using 9th order polynomial fit (based on Uzay's script)
coeffs = np.polyfit(xaxis, np.absolute(chan), 9)
weight.append(coeffs[-1]) # last entry is amplitude - zero order coeff
phases.append(phas)
chans.append(chan)
# normalize weighting function based on spectro data
tmp = np.sum([val*val for val in weight]) # sum squared values
if tmp == 0.0: tmp = 1.0
if flag_norm_to_sum:
lamda = np.sum(weight) / tmp # sum of sensitivities
else:
lamda = 1.0 / np.sqrt(tmp) # sqrt of sum of squared sensitivities
weight = [val*lamda for val in weight]
all_weight[i,:] = weight
all_phases[i,:] = phases
# apply weighting ... phase corrections done above
for j,chan in enumerate(chans):
chans[j] = chan * weight[j]
# sum corrected FIDs from each coil into one combined FID
dat_comb[i,:] = np.sum(chans, axis=0)
print_combine_stats(all_weight, all_phases, method='CMRR_Sequential')
return normalize_shape(dat_comb), all_weight, all_phases
def cmrr_hybrid(raw):
raw_combined1, weights1, phases1 = siemens(raw)
raw_combined2, weights2, phases2 = cmrr_standard(raw)
phases1 = np.angle(np.sum(phases1, axis=0), deg=True)
phases2 = np.angle(np.sum(phases2, axis=0), deg=True)
a = phases2 - phases1
vals = (a + 180) % 360 - 180
delta = np.mean(vals) * np.pi / 180.0
r = cmrr_standard(raw, delta=delta)
return r
def svd_suspect(raw):
"""
Based on Suspect (suspect.processing.channel_combination.svd_weighting)
- change weights normalization to -> sqrt of sum of squared sensitivities
to match up with other routines
- adapted the input/ouput requirements to work for Analysis
"""
nrep, ncoil, nfid, npts = raw.shape
data = np.squeeze(np.mean(raw.copy(), axis=2)) # [ncoil,npts] shape
u, s, v = np.linalg.svd(data, full_matrices=False)
# we truncate SVD to rank 1, v[0] is our FID -> use v[0,0] to phase signal
weights = u[:,0].conjugate()
phases = np.angle(v[0, 0])
norm = np.sqrt(np.sum(np.array([np.abs(item)*np.abs(item) for item in weights])))
norm_weights_phases = weights * np.exp(-1j * phases) / norm
# --------------------------------------------------------------------------
# Apply weights and phases from first scan to all scans
dat_comb = raw.copy()
dat_comb *= norm_weights_phases.reshape(ncoil, 1, 1)
dat_comb = dat_comb.sum(axis=(0,1))
all_weight = np.tile(np.abs(norm_weights_phases), nfid)
all_phases = np.tile(np.angle(norm_weights_phases), nfid)
all_weight.shape = (nfid, ncoil)
all_phases.shape = (nfid, ncoil)
return normalize_shape(dat_comb), all_weight, all_phases
def external_dataset(chain, delta=0.0):
"""
Coil combine method that uses values calculated in another dataset.
Input data is array with (1, ncoils, nfids, npts) dimensions
Output data array has (1, 1, nfids, npts) dimension
Output ndarray collapses ncoils dimension by combining each group of ncoils
FIDs into a weighted/phased summed FID.
Other Output:
all_weight ndarray[nfids,ncoils], float, weights calculated/used to combine
each group of ncoil FIDs into a single FID
all_phases ndarray[nfids,ncoils], complex, phases calculated/used to combine
each group of ncoil FIDs into a single FID
In some cases we want to use the coil_combine results from a single high
SNR water FID to combine multiple low SNR metabolite FIDs. In this case
we create weight and phase arrays that are nfids copies of the 1,ncoils
array of weight and phase values from the external data set.
"""
nrep, ncoil, nfid, npts = chain.raw.shape
dat_comb = np.ndarray([nfid,npts], dtype=np.complex)
all_weight = chain.weights.copy()
all_phases = chain.phases.copy() * np.exp(1j*delta)
# expand/copy weights/phases if nfids dimension differs from raw data here
if all_weight.shape[0] != nfid:
all_weight = np.tile(all_weight[0], nfid)
all_weight.shape = (nfid, ncoil)
if all_phases.shape[0] != nfid:
all_phases = np.tile(all_phases[0], nfid)
all_phases.shape = (nfid, ncoil)
for i in range(nfid):
# apply pre-calc phase and weights to correct for coil geometry
chans = []
for j in range(ncoil):
data = chain.raw[0,j,i,:].copy()
weights = all_weight[i,j]
phases = all_phases[i,j]
chans.append( data * weights * phases )
# sum corrected FIDs from each coil into one combined FID
dat_comb[i,:] = np.sum(chans, axis=0)
print_combine_stats(all_weight, all_phases, method='External Dataset')
return normalize_shape(dat_comb), all_weight, all_phases
def external_dataset_with_offset(chain):
raw_combined1, weights1, phases10 = external_dataset(chain)
raw_combined2, weights2, phases20 = siemens(chain.raw)
phases1 = np.angle(np.sum(phases10, axis=0), deg=True)
phases2 = np.angle(np.sum(phases20, axis=0), deg=True)
# find minimum angle between the two methods, note that depending on
# lead/lag and whether the angles span the dislocation at 0/359 degrees.
a = phases1 - phases2
vals = (a + 180) % 360 - 180
# get rid of outliers if we have enough coils to do so
# - this keeps a few wrong numbers from skewing the mean offset
ncoil = len(vals)
if ncoil >= 32:
nout = 4
elif ncoil >= 8:
nout = 2
else:
nout = 0
if nout:
cmean = np.mean(vals)
cdiff = np.abs(cmean - vals)
for n in range(nout):
indx = np.argmax(cdiff)
cdiff = np.delete(cdiff, indx)
vals = np.delete(vals, indx)
# then we take the mean value and use it as the overall offset
delta = np.mean(vals)
delta = delta * np.pi / 180.0
r = external_dataset(chain, delta=-delta)
return r
def coil_combine_none(chain):
""" here we just copy data from first coil channel """
dat_comb = chain.raw[0,0,:,:].copy()
return normalize_shape(dat_comb)
def normalize_shape(data):
"""returns 'data' with 4 dimensional shape array (x, ncoils, nfids, npts) """
while len(data.shape) < 4:
data.shape = [1, ] + list(data.shape)
return data
#------------------------------------------------------------------------------
# Helper Functions
def print_combine_stats(all_weight, all_phases, method=''):
#pass
return
# take mean values for all fids on one coil
mean_weight = np.mean(all_weight, axis=0)
mean_phases = np.angle(np.mean(all_phases, axis=0), deg=True)
stdv_weight = np.std(all_weight, axis=0)
stdv_phases = np.std(np.angle(all_phases, deg=True), axis=0)
# print '\n'
# print method+" Weights mean = \n", mean_weight
# print method+" Weights ratios = \n", ["{0:0.2f}".format(i/max(abs(mean_weight))) for i in mean_weight]
# print method+" Weights % stdv = ", 100.0*stdv_weight/mean_weight
# print method+" Phases [deg] mean = \n", mean_phases
# print method+" Phases [deg] mean = \n", ["{0:0.2f}".format(i) for i in mean_phases]
# print method+" Phases % stdv = ", 100.0*stdv_phases/mean_phases
'''
function varargout = weightsmod(varargin)
% function varargout = weightsmod(varargin)
% determine and apply weighting Factor for each coil
% method = 1, determine weighting and phz for each coil
% method = 2, apply weights and phz to data
%
% <NAME>, 18 March 2013
if (nargin < 2)
method = 1;
fidw = varargin{1};
elseif (nargin == 3)
method = 2;
fiduse = varargin{1};
wk = varargin{2};
refphz = varargin{3};
else
errorbox('Number of input parameters is not valid in weightsmod','warn')
return
end
if (method == 1)
%% determine weighting and phz for each coil
% zero-order phase correction
% correct for phase based on 1st point in 1st wref fid
[np,nbCoils] = size(fidw);
fidwphz = complex(zeros(size(fidw)));
refphz = zeros(nbCoils,1);
for iy = 1:nbCoils
refphz(iy) = angle(fidw(1,iy));
fidwphz(:,iy) = phase_adjust(fidw(:,iy),refphz(iy));
end
% weighting factor
fidecc = squeeze(fidwphz(:,:));
% amplitude of fid in time domain
% using 9th order polynomial fit (based on Uzay's script)
warning off MATLAB:polyfit:RepeatedPointsOrRescale
pts =(1:np)';
amplfid = zeros(nbCoils,1);
for ical = 1:nbCoils
wktemp = polyfit(pts,abs(double(fidecc(:,ical))),9);
amplfid(ical) = wktemp(end); % last entry is amplitude
% show fit
%f=polyval(wktemp,pts);
%figure, plot(abs(double(fidecc(:,ical))));
%hold on, plot(f,'r');
end
% weighting function
wk = amplfid/sqrt(sum(amplfid.^2));
% return variables
varargout{1} = wk;
varargout{2} = refphz;
elseif (method == 2)
%% apply weights and phz to data
if (ndims(fiduse) == 2) %2D size
fiduse_phz = complex(zeros(size(fiduse)));
nbCoils = size(fiduse,3);
% zero-order phase correction
for iy = 1:nbCoils
fiduse_phz(:,iy) = phase_adjust(fiduse(:,iy),refphz(iy));
end
% apply weightings
fidweighted = complex(zeros(size(fiduse_phz)));
for ical=1:length(wk)
fidweighted(:,ical) = wk(ical).*fiduse_phz(:,ical);
end
%sum all channels
sumfidweighted = sum(fidweighted,2);
elseif (ndims(fiduse) == 3) %3D size
fiduse_phz = complex(zeros(size(fiduse)));
nbCoils = size(fiduse,3);
nt = size(fiduse,2);
% zero-order phase correction
for iy = 1:nbCoils
for ix = 1:nt
fiduse_phz(:,ix,iy) = phase_adjust(fiduse(:,ix,iy),refphz(iy));
end
end
% apply weightings
fidweighted = complex(zeros(size(fiduse_phz)));
for ical=1:length(wk)
fidweighted(:,:,ical) = wk(ical).*fiduse_phz(:,:,ical);
end
%sum all channels
sumfidweighted = sum(fidweighted,3);
end
% return variables
varargout{1} = sumfidweighted ;
end
return
function outfid = phase_adjust(trc, phase)
% function to apply phase angles to complex data, e.g. to use
% variable receiver phases in VNMR
rtrc = real(trc);
itrc = imag(trc);
cosp = cos(phase); % real_cor
sinp = sin(phase); % imag_cor
rout = rtrc.*cosp + itrc.*sinp;
iout = itrc.*cosp - rtrc.*sinp;
outfid = complex(rout, iout);
return
'''
| 2.359375 | 2 |
ipx800/__init__.py | marcaurele/py-ipx800 | 0 | 12788463 | <reponame>marcaurele/py-ipx800<filename>ipx800/__init__.py
"""Module to control the IPX800 V4 device from GCE Electronics."""
from ipx800.ipx800 import ApiError, IPX800 as ipx800 # noqa
__version__ = "0.6.dev0"
| 1.578125 | 2 |
apps/salt/salt_api.py | cc0411/oms | 0 | 12788464 | <gh_stars>0
# -*- coding: utf-8 -*-
from django.conf import settings
import time
import copy
import requests
import logging
# Create your views here.
logger = logging.getLogger('devoms.views')
# 封装salt-api的调用
class SaltAPI(object):
# 给获取token的几个参数设置默认值,这样如果真突然出现调用另一个salt-api的情况,直接把对应参数传入一样适用哈哈
# 第一个参数session是为了给with as传入使用的,因为用with as会在程序执行完成后回收资源,不然Session是长连接占着连接不知道会不会造成影响后期
def __init__(self, apiurl=settings.SALT_API_URL, username=settings.SALT_API_NAME, password=settings.SALT_API_PWD,
eauth='<PASSWORD>'):
self.url = apiurl
self.username = username
self.password = password
self.eauth = eauth
self.headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
self.__base_data = dict(
username=self.username,
password=<PASSWORD>,
eauth='<PASSWORD>'
)
self.__token = self.get_token()
# 获取token
def get_token(self):
params = copy.deepcopy(self.__base_data)
requests.packages.urllib3.disable_warnings()
# 初始化获取api的token
ret = requests.post(self.url + '/login', verify=False, headers=self.headers, json=params, timeout=30)
ret_json = ret.json()
token = ret_json["return"][0]["token"]
return token
# 先做一个最通用的方法,就是不定义data的各个东西,在使用的时候定义好带入,好处是任何一个saltapi的操作都能支持,而且可以单独使用
def public(self, message='public', **kwargs):
headers_token = {'X-Auth-Token': self.__token}
headers_token.update(self.headers)
requests.packages.urllib3.disable_warnings()
ret = requests.post(url=self.url, verify=False, headers=headers_token, **kwargs)
ret_code, ret_data = ret.status_code, ret.json()
return ret_data
# 封装test.ping,默认执行salt '*' test.ping
def test_api(self, tgt='*'):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'test.ping'}
message = 'test_api'
return self.public(message, json=params)
# 封装cmd.run,使用的时候只要代入tgt和arg即可,最多把tgt_type也代入
def cmd_run_api(self, tgt, arg):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'cmd.run', 'arg': arg}
message = 'cmd_run_api'
return self.public(message, json=params)
# 封装异步cmd.run,使用的时候只要代入tgt和arg即可,最多把tgt_type也代入
def async_cmd_run_api(self, tgt, arg):
params = {'client': 'local_async', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'cmd.run', 'arg': arg}
message = 'async_cmd_run_api'
return self.public(message, json=params)
# 封装state.sls,使用的时候只要代入tgt和arg即可,最多把tgt_type也代入
def state_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'state.sls', 'arg': arg}
message = 'state_api'
return self.public(message, json=params)
# a安装minion,需要搭配minion_install.sls使用
def install_minion_api(self, tgt, arg=None):
params = {'client': 'ssh', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'state.sls', 'arg': arg}
message = 'install_minion_api'
return self.public(message, json=params)
# 封装异步state.sls,使用的时候只要代入tgt和arg即可,最多把tgt_type也代入,得到结果为jid号
def async_state_api(self, tgt, arg=None):
params = {'client': 'local_async', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'state.sls', 'arg': arg}
message = 'async_state_api'
return self.public(message, json=params)
# 封装通过jid查询任务执行状态,以便后续操作,返回[{}]表示执行完毕,返回数据表示还在执行
def job_active_api(self, tgt, arg, ):
params = {'client': 'local', 'fun': 'saltutil.find_job', 'tgt_type': 'glob', 'tgt': tgt, 'arg': arg}
message = 'job_active_api'
return self.public(message, json=params)
def grains_append_api(self,tgt='*' ):
params = {'client': 'local', 'tgt': tgt,'fun': 'saltutil.sync_grains', 'tgt_type': 'glob'}
message = 'grains_append_api'
return self.public(message, json=params)
# 封装查询jid执行状态,使用的时候只要代入jid既可以,返回true表示执行结束并且成功退出,false表示没有成功或者还没执行完毕
def job_exit_success_api(self, jid):
params = {'client': 'runner', 'fun': 'jobs.exit_success', 'jid': jid}
message = 'job_exit_success_api'
return self.public(message, json=params)
# 封装查询jid结果方法,使用的时候只要代入jid既可以
def jid_api(self, jid):
params = {'client': 'runner', 'fun': 'jobs.lookup_jid', 'jid': jid}
message = 'jid_api'
return self.public(message, json=params)
# 封装archive.zip,使用的时候只要代入tgt和arg即可,最多把tgt_type也代入
def archive_zip_api(self, tgt, arg):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'archive.zip', 'arg': arg}
message = 'archive_zip_api'
return self.public(message, json=params)
# 封装archive.tar,使用的时候只要代入tgt和arg即可,最多把tgt_type也代入
def archive_tar_api(self, tgt, arg):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'archive.tar', 'arg': arg}
message = 'archive_tar_api'
return self.public(message, json=params)
# 封装cp.get_file,使用的时候只要代入tgt和arg即可,最多把tgt_type也代入
def cp_get_file_api(self, tgt, arg):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'cp.get_file', 'arg': arg}
message = 'cp_get_file_api'
return self.public(message, json=params)
# 封装cp.get_dir,使用的时候只要代入tgt和arg即可,最多把tgt_type也代入
def cp_get_dir_api(self, tgt, arg):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'cp.get_dir', 'arg': arg}
message = 'cp_get_dir_api'
return self.public(message, json=params)
# 封装salt-key -L
def saltkey_listall_api(self):
params = {'client': 'wheel', 'fun': 'key.list_all'}
message = 'saltkey_listall_api'
return self.public(message, json=params)
def saltkey_delete_api(self, tgt):
params = {'client': 'wheel', 'fun': 'key.delete', 'match': tgt}
message = 'saltkey_delete_api'
return self.public(message, json=params)
# 接受salt-key的方法的include_rejected和include_denied就算设置为True也无效测试发现!!
def saltkey_accept_api(self, tgt):
parmas = {'client': 'wheel', 'fun': 'key.accept', 'match': tgt}
message = 'saltkey_accept_api'
return self.public(message, json=parmas)
# 拒绝salt-key的方法奶奶的include_accepted和include_denied就算设置为True也无效测试发现!!
def saltkey_reject_api(self, tgt):
parmas = {'client': 'wheel', 'fun': 'key.reject', 'match': tgt}
message = 'saltkey_reject_api'
return self.public(message, json=parmas)
# salt-run manage.status 查看minion在线离线状态,速度比较慢但是没BUG不像salt-run manage.alived
def saltrun_manage_status_api(self, arg=None):
params = {'client': 'runner', 'fun': 'manage.status', 'arg': arg}
message = 'saltrun_manage_status_api'
return self.public(message, json=params)
# salt-run manage.alived 查看在线的minion,非常快速方便可惜有bug后来启用(而且可以带参数show_ipv4=True获取到和master通信的ip是什么,默认False)
def saltrun_manage_alive_api(self, arg=None):
params = {'client': 'runner', 'fun': 'manage.alived', 'arg': arg}
message = 'saltrun_manage_alive_api'
return self.public(message, json=params)
# salt-run manage.not_alived 查看不在线的minion,非常快速方便可惜有bug后来启用
def saltrun_manage_notalive_api(self, arg=None):
params = {'client': 'runner', 'fun': 'manage.not_alived', 'arg': arg}
message = 'saltrun_manage_notalive_api'
return self.public(message, json=params)
# 封装grains.items,使用的时候只要代入tgt即可,最多把tgt_type也代入
def grains_items_api(self, tgt, tgt_type='glob'):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': tgt_type, 'fun': 'grains.items'}
message = 'grains_items_api'
return self.public(message, json=params)
# 封装grains.item,使用的时候只要代入tgt和arg即可,最多把tgt_type也代入
def grains_item_api(self, tgt, tgt_type='glob', arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': tgt_type, 'fun': 'grains.item', 'arg': arg}
message = 'grains_item_api'
return self.public(message, json=params)
# 封装service.available查看服务是否存在Ture or False,使用的时候只要代入tgt和arg即可,最多把tgt_type也代入
def service_available_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'service.available', 'arg': arg}
message = 'service_available_api'
return self.public(message, json=params)
# 封装service.status查看启动服务状态,使用的时候只要代入tgt和arg即可,最多把tgt_type也代入
def service_status_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'service.status', 'arg': arg}
message = 'service_status_api'
return self.public(message, json=params)
# 封装service.start启动系统服务windows和linux通用,
def service_start_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'service.start', 'arg': arg}
message = 'service_start_api'
return self.public(message, json=params)
# 封装service.stop停止系统服务windows和linux通用,salt '*' service.stop <service name>,由于有发现停止成功但是返回结果是
# 一堆错误提示,所以最好使用的时候最后做一步service.status,返回服务状态True说明启动,False说明停止了
def service_stop_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'service.stop', 'arg': arg}
message = 'service_stop_api'
return self.public(message, json=params)
# 封装ps.pgrep查看name的进程号windows和linux通用带模糊查询效果,
def ps_pgrep_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'ps.pgrep', 'arg': arg}
message = 'ps_pgrep_api'
return self.public(message, json=params)
# 封装ps.proc_info通过进程号查看详细信息,
# {'client':'local', 'tgt':'id','fun':'ps.proc_info', 'arg':['pid=123','attrs=["cmdline","pid","name","status"]']}
def ps_proc_info_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'ps.proc_info', 'arg': arg}
message = 'ps_proc_info_api'
return self.public(message, json=params)
# 封装ps.kill_pid结束某个进程,{'client':'local','fun':'ps.kill_pid','tgt':'192.168.68.1', 'arg':['pid=11932']}
def ps_kill_pid_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'ps.kill_pid', 'arg': arg}
message = 'ps_kill_pid_api'
return self.public(message, json=params)
# 封装task.create_task创建windows计划任务,salt '192.168.68.1' task.create_task ooxx action_type=Execute
# cmd='"C:\ooxx\Shadowsocks.exe"' force=true execution_time_limit=False user_name=administrator
def task_create_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'task.create_task', 'arg': arg}
message = 'task_create_api'
return self.public(message, json=params)
# 封装task.run启动windows计划任务,salt '192.168.100.171' task.run test1
# !坑!官方文档里命令是salt '192.168.100.171' task.list_run test1 根本就不行!
def task_run_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'task.run', 'arg': arg}
message = 'task_run_api'
return self.public(message, json=params)
# 封装task.stop启动windows计划任务,salt '192.168.100.171' task.run test1
def task_stop_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'task.stop', 'arg': arg}
message = 'task_stop_api'
return self.public(message, json=params)
# 封装file.mkdir,创建目录最后可以不需要/号,另一个file.makedirs则需要最后/,不然只创建到有/那一层这点也是可以利用的呵呵
def file_mkdir_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'file.mkdir', 'arg': arg}
message = 'file_mkdir_api'
return self.public(message, json=params)
# 封装file.makedirs,创建目录最后可以需要/号,不然只创建到有/那一层这点也是可以利用的呵呵,x相当于mkdir -p
def file_makedirs_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'file.makedirs', 'arg': arg}
message = 'file_makedirs_api'
return self.public(message, json=params)
# 封装file_exists,检查文件是否存在
def file_exists_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'file.file_exists', 'arg': arg}
message = 'file_exists_api'
return self.public(message, json=params)
# file_write_api(tgt='salt1',arg=('/root/test.text','word'))
def file_write_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'file.append', 'arg': arg}
message = 'file_write_api'
return self.public(message, json=params)
# 封装file.remove,移除文件,如果是目录则递归删除
def file_remove_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'file.remove', 'arg': arg}
message = 'file_remove_api'
return self.public(message, json=params)
# 封装file.directory_exists,检测目录是否存在返回True/False
def file_directory_exists_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'file.directory_exists', 'arg': arg}
message = 'file_directory_exists_api'
return self.public(message, json=params)
# 封装file.symlink,创建软连接
def file_symlink_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'file.symlink', 'arg': arg}
message = 'file_symlink_api'
return self.public(message, json=params)
# 封装supervisord.status,检测supervisor守护名称的状态,返回结果有这几种情况:
# 1、没这个命令,salt需要安装supervisor才能用{'return': [{'192.168.100.171': "'supervisord.status' is not available."}]}
# 2、没这个守护名称{'return': [{'192.168.68.50-master': {'1:': {'reason': '(no such process)', 'state': 'ERROR'}}}]}
# 3、安装了supervisor但是没启动{'return': [{'192.168.100.170': {'unix:///var/run/supervisor/supervisor.sock': {'reason'
# : 'such file', 'state': 'no'}}}]}
# 4、正常获取结果的情况:
# 启动{'return': [{'192.168.68.50-master': {'djangoproject.runserver': {'state': 'RUNNING', 'reason': 'pid 1233,
# uptime 1 day, 6:56:14'}}}]}
# 停止{'return': [{'192.168.100.170': {'test': {'state': 'STOPPED', 'reason': 'Dec 13 05:23 PM'}}}]}
def supervisord_status_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'supervisord.status', 'arg': arg}
message = 'supervisord_status_api'
return self.public(message, json=params)
# 封装supervisord.stop,停止supervisor守护名称,返回结果除了上面status的前面3中情况还有以下几种情况:
# 1、程序已经停止情况:{'return': [{'192.168.100.170': 'test: ERROR (not running)'}]}
# 2、正常停止:{'return': [{'192.168.100.170': 'test: stopped'}]}
# 3、没这个程序名称{'return': [{'192.168.100.170': 'test1: ERROR (no such process)'}]}
def supervisord_stop_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'supervisord.stop', 'arg': arg}
message = 'supervisord_stop_api'
return self.public(message, json=params)
# 封装supervisord.start,启动supervisor守护名称,返回结果有这几种情况:
# 1、正常启动:{'return': [{'192.168.100.170': 'test: started'}]}
# 2、已经启动过了{'return': [{'192.168.100.170': 'test: ERROR (already started)'}]}
# 3、没这个程序名称{'return': [{'192.168.100.170': 'test1: ERROR (no such process)'}]}
def supervisord_start_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'supervisord.start', 'arg': arg}
message = 'supervisord_start_api'
return self.public(message, json=params)
# supervisord配置重载会启动新添加的程序
def supervisord_update_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'supervisord.update', 'arg': arg}
message = 'supervisord_update_api'
return self.public(message, json=params)
# 封装rsync.rsync同步命令
def rsync_rsync_api(self, tgt, arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'rsync.rsync', 'arg': arg}
message = 'rsync_rsync_api'
return self.public(message, json=params)
# 封装异步rsync.rsync同步命令
def async_rsync_rsync_api(self, tgt, arg=None):
params = {'client': 'local_async', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'rsync.rsync', 'arg': arg}
message = 'async_rsync_rsync_api'
return self.public(message, json=params)
# 封装sys.doc查询模块帮助命令
def sys_doc_api(self, tgt='*', arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'sys.doc', 'arg': arg}
message = 'sys_doc_api'
return self.public(message, json=params)
# 封装sys.doc查询模块帮助命令
def sys_runner_doc_api(self, tgt='*', arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'sys.runner_doc', 'arg': arg}
message = 'sys_runner_doc_api'
return self.public(message, json=params)
# 封装sys.doc查询模块帮助命令
def sys_state_doc_api(self, tgt='*', arg=None):
params = {'client': 'local', 'tgt': tgt, 'tgt_type': 'glob', 'fun': 'sys.state_doc', 'arg': arg}
message = 'sys_state_doc_api'
return self.public(message, json=params)
| 2.0625 | 2 |
choir/modeling/meta_arch/__init__.py | scwangdyd/large_vocabulary_hoi_detection | 9 | 12788465 | # -*- coding: utf-8 -*-
from .build import META_ARCH_REGISTRY, build_model
from .hoi_detector import HOIR
from .cascade_hoi_detector import CHOIR | 1 | 1 |
students/k3342/practical_works/Kocheshkova_Kseniia/django_project_kocheshkova/project_first_app/admin.py | Derimeer/ITMO_ICT_WebProgramming_2020 | 0 | 12788466 | from django.contrib import admin
from .models import Owner
admin.site.register(Owner)
from .models import Car
admin.site.register(Car)
from .models import Ownership
admin.site.register(Ownership)
from .models import License
admin.site.register(License)
| 1.375 | 1 |
codes_auto/1633.minimum-number-of-increments-on-subarrays-to-form-a-target-array.py | smartmark-pro/leetcode_record | 0 | 12788467 | <reponame>smartmark-pro/leetcode_record
#
# @lc app=leetcode.cn id=1633 lang=python3
#
# [1633] minimum-number-of-increments-on-subarrays-to-form-a-target-array
#
None
# @lc code=end | 1.1875 | 1 |
src/apis/scaffold/install.py | OMOBruce/epic-awesome-gamer | 0 | 12788468 | <filename>src/apis/scaffold/install.py<gh_stars>0
# -*- coding: utf-8 -*-
# Time : 2022/1/20 16:16
# Author : QIN2DIM
# Github : https://github.com/QIN2DIM
# Description:
from webdriver_manager.chrome import ChromeDriverManager
from services.settings import DIR_MODEL, logger
from services.utils import YOLO, CoroutineSpeedup, ToolBox
def _download_model():
"""
下载 YOLOv4 目标检测模型
:return:
"""
logger.debug("下载 YOLOv4 目标检测模型...")
YOLO(dir_model=DIR_MODEL).download_model()
def _download_driver():
"""
下载浏览器驱动。
:return:
"""
logger.debug("适配 ChromeDriver...")
ChromeDriverManager(version="latest").install()
class PerformanceReleaser(CoroutineSpeedup):
def __init__(self, docker, power=None):
super(PerformanceReleaser, self).__init__(docker=docker, power=power)
@logger.catch()
def control_driver(self, task, *args, **kwargs):
task()
def run():
"""
下载项目运行所需的各项依赖。
:return:
"""
logger.debug(
ToolBox.runtime_report(
motive="BUILD",
action_name="ScaffoldInstaller",
message="正在下载系统依赖",
)
)
PerformanceReleaser(docker=[_download_driver, _download_model], power=3).go()
logger.success(
ToolBox.runtime_report(
motive="GET",
action_name="ScaffoldInstaller",
message="系统依赖下载完毕",
)
)
@logger.catch()
def test():
from services.utils import get_challenge_ctx
ctx = get_challenge_ctx(silence=True)
try:
ctx.get("https://www.baidu.com")
finally:
ctx.quit()
logger.success(
ToolBox.runtime_report(
motive="TEST",
action_name="ScaffoldInstaller",
message="驱动适配成功",
)
)
| 2.25 | 2 |
divdis/model.py | AlexandreAbraham/DivDis | 0 | 12788469 | <gh_stars>0
import torch
from torch import nn
class DivDis(nn.Module):
def __init__(self, backbone, n_output, n_heads, n_classes, lambda_mi=1., lambda_reg=1.):
super().__init__()
self.backbone = backbone
self.heads = [nn.Sequential(nn.Linear(n_output, n_classes), nn.Softmax(dim=1)) for _ in range(n_heads)]
self.active_head = None
def set_active_head(self, active_head):
if not 0 <= active_head < len(self.heads):
raise ValueError('Invalid_head')
self.active_head = active_head
def forward(self, x):
t = self.backbone(x)
if self.training:
# Training mode, predict with all heads
preds = [head(t) for head in self.heads]
return torch.stack(preds, dim=1)
else:
return self.heads[self.active_head](t)
def parameters(self):
for parameters in self.backbone.parameters():
yield parameters
for head in self.heads:
for parameters in head.parameters():
yield parameters | 2.453125 | 2 |
scrapper/request.py | samedamci/searx-instance-scrapi | 0 | 12788470 | #!/usr/bin/env python3
from scrapper import INSTANCE_URL
from bs4 import BeautifulSoup
import requests
def get_html() -> BeautifulSoup:
"""Function makes GET request to instance and downloads raw HTML code
which is parsing after."""
html_doc = requests.get(f"{INSTANCE_URL}/preferences").content
html = BeautifulSoup(html_doc, "html.parser")
return html
| 3.203125 | 3 |
security/JWT.py | TusharMalakar/Core_CollabService | 0 | 12788471 | import datetime
import jwt
import os
from functools import wraps
from flask import request, Response
SECRET_KEY = "ThisIsAVeryBadAPISecretKeyThatIsOnlyUsedWhenRunningLocally"
if 'API_KEY' in os.environ: SECRET_KEY = os.environ['API_KEY']
# generates an encrypted auth token using the encrypted using the secret key valid for 24 hours
def encode_auth_token(userName):
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=1),
'iat': datetime.datetime.utcnow(),
'username': userName
}
return jwt.encode(
payload,
SECRET_KEY,
algorithm='HS256'
)
except Exception as e:
return e
# Decodes the auth token and returns userid as integer if token is valid or else an error as a string
def decode_auth_token(auth_token):
# print(auth_token)
try:
payload = jwt.decode(auth_token, SECRET_KEY)
return 'SUCCESS' + payload['username']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
# Defines the @requires_auth decoration. Any endpoint with the decoration requires authentication
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth_token = False
if not auth_token:
auth_token = request.headers.get('capstoneAuth')
if not auth_token:
auth_token = request.headers.get('Authorization')
if not auth_token:
auth_token = request.cookies.get('capstoneAuth')
if not auth_token: # Authtoken no present so send 401
return Response('Missing Auth Token!\n' 'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
user_name = decode_auth_token(auth_token) # Get userid from authtoken
if user_name.startswith('SUCCESS'):
# set the userNameFromToken var so user can be identified form the request
request.userNameFromToken = user_name[7:]
# send control back to actual endpoint function
return f(*args, **kwargs)
else:
return Response('\n' 'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
return decorated
| 2.90625 | 3 |
Apps/WeatherApp/app.py | miyucode/MaxPyOS | 2 | 12788472 | from tkinter import *
from tkinter.ttk import *
from tkinter import ttk
import tkinter.messagebox as mb
import sys, os, requests
def weatherapp():
def closeweatherapp():
file = open('Apps/WeatherApp/src/weather-condition.txt', 'w')
file.write("")
file.close()
weatherapp.destroy()
def getWeather(canvas):
def readintoweatherconditionfile():
file = open('Apps/WeatherApp/src/weather-condition.txt', 'r')
content = file.read()
if content == "Clear":
weatherapp.iconbitmap("Apps/WeatherApp/icons/sun-icon.ico")
elif content == "Rain":
weatherapp.iconbitmap("Apps/WeatherApp/icons/rain-icon.ico")
elif content == "Clouds":
weatherapp.iconbitmap("Apps/WeatherApp/icons/clouds-icon.ico")
elif content == "Stormy":
weatherapp.iconbitmap("Apps/WeatherApp/icons/stormyrain-icon.ico")
elif content == "Haze":
weatherapp.iconbitmap("Apps/WeatherApp/icons/windy-icon.ico")
elif content == "Mist":
weatherapp.iconbitmap("Apps/WeatherApp/icons/haze-icon.ico")
else:
pass
file.close()
city = namecity.get()
api = "https://api.openweathermap.org/data/2.5/weather?q="+city+"&appid=06c921750b9a82d8f5d1294e1586276f"
try:
weatherapp.iconbitmap("Apps/WeatherApp/icons/weatherapp-icon.ico")
json_data = requests.get(api).json()
condition = json_data['weather'][0]['main']
file = open('Apps/WeatherApp/src/weather-condition.txt', 'w')
file.write(condition)
file.close()
temp = int(json_data['main']['temp'] - 273.15)
min_temp = int(json_data['main']['temp_min'] - 273.15)
max_temp = int(json_data['main']['temp_max'] - 273.15)
pressure = json_data['main']['pressure']
humidity = json_data['main']['humidity']
wind = json_data['wind']['speed']
final_info = condition + "\n" + str(temp) + "°C"
final_data = "\n" + "Minimal temperature: " + str(min_temp) + "°C" + "\n" + "Maximal temperature: " + str(max_temp) + "°C" + "\n" + "Humidity: " + str(humidity) + "\n"
readintoweatherconditionfile()
label1.config(text=final_info)
label2.config(text=final_data)
except:
weatherapp.iconbitmap("Apps/WeatherApp/icons/weatherapp-icon.ico")
file = open('Apps/WeatherApp/src/weather-condition.txt', 'w')
file.write("")
file.close()
label1.config(text="This city doesn't exist !")
label2.config(text="")
weatherapp = Tk()
weatherapp.title("MaxPyOS - Weather App")
weatherapp.geometry("600x500")
weatherapp.resizable(False, False)
weatherapp.iconbitmap("Apps/WeatherApp/icons/weatherapp-icon.ico")
weatherapp.protocol("WM_DELETE_WINDOW", lambda: closeweatherapp())
f = ("poppins", 15, "bold")
t = ("poppins", 35, "bold")
namecity = Entry(weatherapp, justify='center', width=20, font=t)
namecity.pack(pady=20)
namecity.focus()
namecity.bind('<Return>', getWeather)
label1 = Label(weatherapp, font=t)
label1.pack()
label2 = Label(weatherapp, font=f)
label2.pack() | 3.125 | 3 |
mlflow/odahuflow/trainer/helpers/fs.py | odahu/odahuTrainer | 5 | 12788473 | import os
import shutil
def copytree(src, dst):
"""
Copy file tree from <src> location to <dst> location
"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d)
else:
shutil.copy2(s, d)
| 3.359375 | 3 |
OrzMC/core/PaperAPI.py | OrzGeeker/OrzMC | 7 | 12788474 | # -*- coding: utf8 -*-
# papermc: https://papermc.io
# api v1: https://paper.readthedocs.io/en/latest/site/api.html
# api v2: https://papermc.io/api/docs/swagger-ui/index.html?configUrl=/api/openapi/swagger-config
import urllib.request
import urllib.parse
import json
class PaperAPI:
''' api documentation: https://paper.readthedocs.io/en/stable/site/api.html '''
API = 'https://papermc.io/api/%(API_VERSION)s/%(PROJECT_NAME)s/%(PROJECT_VERSION)s/%(BUILD_ID)s/download'
@classmethod
def downloadURLV1(cls, project_name = 'paper', project_version = None, build_id = 'latest'):
return PaperAPI.API % {
'API_VERSION': 'v1',
'PROJECT_NAME': project_name,
'PROJECT_VERSION': project_version,
'BUILD_ID': build_id
}
BASE_URL = 'https://papermc.io'
@classmethod
def downloadURLV2(cls, version):
url = 'https://papermc.io/api/'
url +='v2/'
url += 'projects/paper/'
url += 'versions/%s' % version
jsonResp = json.loads(urllib.request.urlopen(url).read().decode('utf-8'))
builds = jsonResp.get('builds')
latest_build = max(builds)
url += '/builds/%s' % latest_build
jsonResp = json.loads(urllib.request.urlopen(url).read().decode('utf-8'))
jar_name = jsonResp.get('downloads').get('application').get('name')
url += '/downloads/%s' % jar_name
return url
| 2.109375 | 2 |
lib/BarSeqPy/data_prep1.py | OGalOz/barseqR | 0 | 12788475 | import os, logging, json, re
import pandas as pd
import numpy as np
from BarSeqPy.translate_R_to_pandas import *
def data_prep_1(data_dir, FEBA_dir, debug_bool=False, meta_ix=7, cfg=None):
""" The first phase of data preparation for the BarSeqR Computations
Args:
data_dir: (str) Path to directory which contains the
following files: 'all.poolcount', 'genes',
'exps', 'pool' - all TSV files.
Optionally contains the following files:
strainusage.barcodes.json - json list
strainusage.genes.json - json list
strainusage.genes12.json - json list
ignore_list.json - json list ( list of str
with sample-index name to ignore )
All these files are changed depending on the input.
FEBA_dir: (str) Path to directory which contains the
following files: 'desc_short_rules'
debug_bool: Whether you'd like to print the dataframes
as a test to the data_dir before running FEBA_Fit
meta_ix (int): The number of meta column indeces in all.poolcount
cfg (python dict): The default and config variables required:
drop_exps (bool): Do we drop the 'Drop' experiments
from the experiments dataframe
already?
okControls (bool): Are we defining controls by
the method where it's written
into the Experiments file?
Returns:
list<exps_df, all_df, genes_df,
strainsUsed_list, genesUsed_list, genesUsed12_list>
exps_df (pandas DataFrame): Must contain cols: (Variable)
all_df (pandas DataFrame): Must contain cols:
genes_df (pandas DataFrame): Must contain cols:
scaffold, begin
strainsUsed_list (py list or None):
genesUsed_list (py list or None):
genesUsed12_list (py list or None):
Description:
Within data_prep1 we perform the following functions:
getDataFrames:
We import the tables genes, all, exps, rules using a dict to say which
data type is in each column. The dataframes we get are called:
genes_df, all_df, exps_df, rules_df
Within exps_df:
We optionally remove the rows who have 'Drop' set to True (if drop_exps==True).
We strip (remove the spaces from) the values in 'Group',
'Condition_1', 'Condition_2'
We check that the right column names exist in each of the tables.
checkLocusIdEquality:
We check all the locusIds in all_df are also present in genes_df
If debugging we also print the number of unique locusIds in each.
check_exps_df_against_all_df:
We check that the index names in all.poolcount are equivalent to the
'SetName' + '.' + 'Index' in exps
prepare_set_names:
We replace the SetNames from their original version to a simplified standard one,
remove the period in between SetName and Index in all.poolcount columns,
and make the 'names' column in the experiments file and the all.poolcount columns
have the same values. For example, we move column name from Keio_ML9_set2.IT004 to
set2IT004, and rename the values in the Experiments file similarly.
get_special_lists:
We get the lists from the files in data_dir if they are there,
otherwise we return their values as empty lists. The lists we
look for are genesUsed, which should be a list of locusIds
from this genome that we are using, and ignore_list, which is a list
of experiment names to ignore (columns from all.poolcount).
If debug_bool is set to true we print out resultant exps, all, genes to 'tmp' dir
We return the following variables:
'exps_df' (The experiments dataframe)
'all_df' (The barcodes and locations dataframe)
'genes_df' (The total genes dataframe)
'genesUsed_list' (A python list of locusIds that we will use)
'ignore_list' (A python list of experiment names to ignore)
"""
genes_df, all_df, exps_df, rules_df = getDataFrames(data_dir, FEBA_dir,
drop_exps=cfg['drop_exps'],
okControls = cfg['okControls'],
dbg_lvl=0)
# Makes no changes to the variables
checkLocusIdEquality(all_df, genes_df, debug_bool=debug_bool)
# We check that SetNames and Indexes in experiments file match all.poolcount file
check_exps_df_against_all_df(exps_df, all_df, meta_ix)
# We make it so the names are cleaner and create 'names', 'num', 'short' in exps_df
exps_df, all_df, replace_col_d = prepare_set_names(exps_df, all_df, rules_df,
okControls=cfg['okControls'],
meta_ix=meta_ix,
debug_bool=debug_bool)
genesUsed_list, ignore_list = get_special_lists(data_dir, all_df,
replace_col_d, debug_bool=debug_bool)
if debug_bool:
exps_df.to_csv("tmp/py_test1_exps_fp.tsv", sep="\t")
all_df.to_csv("tmp/py_test1_all_fp.tsv", sep="\t")
genes_df.to_csv("tmp/py_test1_genes_fp.tsv", sep="\t")
return [exps_df, all_df, genes_df, genesUsed_list, ignore_list]
def getDataFrames(data_dir, FEBA_dir, drop_exps=False,
okControls=False, dbg_lvl=0):
"""
Args:
data_dir: (str) Path to directory which contains the
following files: 'all.poolcount', 'genes',
'exps' - all TSV files.
Optionally contains the following files:
strainusage.barcodes.json - json list
strainusage.genes.json - json list
strainusage.genes12.json - json list
All these files are changed depending on the input.
FEBA_dir: (str) Path to directory which contains the
following files: 'desc_short_rules'
drop_exps (bool): Should we drop all experiments that have Drop=True
already?
Returns:
genes_df (pandas DataFrame): Contains columns:
locusId, sysName, type, scaffoldId, begin, end, strand, name, desc, GC, nTA
all_df (pandas DataFrame): Contains columns:
barcode, rcbarcode, scaffold, strand, pos, locusId, f, setName1, ..., setNameN
exps_df (pandas DataFrame): Must contains columns:
Index (str)
Date_pool_expt_started (str)
Description (str)
SetName (Str)
Group (str)
Drop (bool)
[Condition_1]
[Condition_2]
rules_df (pandas DataFrame): Contains columns:
V1 (str): Original string to replace
V2 (str): String to replace V1 by
Description:
We import the tables using a dict to say which data type is in each column.
In exps_df:
We might remove the rows who have 'Drop' set to True (if drop_exps==True).
We remove the spaces from the values in 'Group', 'Condition_1', 'Condition_2'
We check that the right column names exist in each of the tables.
To Do:
Should we strip all of the column names when we import them?
"""
data_files = os.listdir(data_dir)
for x in ["all.poolcount", "genes", "exps", "pool"]:
if x not in data_files:
raise Exception("Input data_dir to RunFEBA must include files:\n"
"all.poolcount, genes, exps, and pool."
" Currently missing: " + x)
all_fp = os.path.join(data_dir, "all.poolcount")
genes_fp = os.path.join(data_dir, "genes")
exps_fp = os.path.join(data_dir, "exps")
short_rules_fp = os.path.join(FEBA_dir, "desc_short_rules.tsv")
# Checking access permissions
for x in [all_fp, genes_fp, exps_fp]:
if not os.access(x, os.R_OK):
raise Exception("To run, program requires read permission to file " + x)
# Read tsv files into dataframes, making sure columns locusId and scaffoldId read as stings
genes_dtypes = {
'locusId': str,
'sysName': str,
'type': int,
'scaffoldId': str,
'begin': int,
'end': int,
'strand': str,
'name': str,
'desc': str,
'GC': float,
'nTA': int
}
genes_df = pd.read_table(genes_fp, dtype=genes_dtypes)
#barcode rcbarcode scaffold strand pos locusId f
all_dtypes = {
'barcode': str,
'rcbarcode': str,
'scaffold': str,
'strand': str,
'pos': int,
'locusId': str,
'f': float
}
all_df = pd.read_table(all_fp, dtype=all_dtypes)
exps_dtypes = {
'SetName': str,
'Index': str,
'Date_pool_expt_started': str,
"Description": str,
"Group": str,
"Drop": str,
"Condition_1": str,
"Condition_2": str,
"control_group": str,
"control_bool": str
}
exps_df = pd.read_table(exps_fp, dtype=exps_dtypes)
# We update the 'Drop' experiments
if 'Drop' in exps_df:
new_drops = []
for ix, value in exps_df['Drop'].items():
if not isinstance(value, str):
if pd.isna(value):
new_drops.append(False)
else:
raise Exception(f"Value in 'Drop' not string: {value}")
elif str(value).strip().upper() == "TRUE":
new_drops.append(True)
elif value.strip().upper() == "FALSE":
new_drops.append(False)
else:
raise Exception(f"Cannot recognize Drop value in row {ix}:"
f" {value}")
exps_df['Drop'] = new_drops
else:
exps_df['Drop'] = [False]*exps_df.shape[0]
"""
if drop_exps:
# Removing Drop rows
exps_df.drop(remove_indeces, axis=0, inplace=True)
"""
# Remove trailing spaces:
for x in ["Group", "Condition_1", "Condition_2", "control_bool"]:
if x in exps_df:
# We take the entire column (pandas Series) and remove the spaces
# from either end
exps_df[x] = exps_df[x].str.strip()
rules_dtypes = {
"V1": str,
"V2": str
}
rules_df = pd.read_table(short_rules_fp, keep_default_na=False, dtype=rules_dtypes)
# Checking genes.GC
for x in ["scaffoldId", "locusId", "sysName", "desc", "begin", "end"]:
if x not in genes_df.columns:
raise Exception(f"Genes table must include header {x}")
# Checking exps table
for x in ["SetName", "Index", "Date_pool_expt_started", "Description"]:
if x not in exps_df.columns:
raise Exception(f"Experiments table must include header {x}")
if okControls:
for x in ["control_group", "control_bool"]:
if x not in exps_df.columns:
raise Exception("If okControls is set To True, then "
f"experiments table must include header {x}")
# Checking all_df
for x in ["scaffold", "locusId", "f", "pos"]:
if x not in all_df.columns:
raise Exception(f"All.PoolCount file must include header {x}")
if dbg_lvl > 1:
print(genes_df)
print(all_df)
print(exps_df)
print(rules_df)
return [genes_df, all_df, exps_df, rules_df]
def checkLocusIdEquality(all_df, genes_df, debug_bool=False):
""" We check all the locusIds in all_df are also present in genes_df
Description:
We check all the locusIds in all_df are also present in genes_df
If debugging we also print the number of unique locusIds
"""
if debug_bool:
logging.debug("Original locusId col")
logging.debug(all_df['locusId'])
# below both are pandas series
unique_all_locusIds = all_df['locusId'].dropna().unique()
unique_genes_locusIds = genes_df['locusId'].dropna().unique()
if debug_bool:
# All
logging.debug("Unique All Locus Ids: ")
logging.debug(unique_all_locusIds)
logging.debug("Number of Unique All Locus Ids: ")
logging.debug(len(unique_all_locusIds))
# Genes
logging.debug("Unique Gene Locus Ids: ")
logging.debug(unique_genes_locusIds)
logging.debug("Number of Unique Gene Locus Ids: ")
logging.debug(len(unique_genes_locusIds))
# Checking if every locusId from all.poolcount also exists in genes
not_found_locusIds = []
for x in unique_all_locusIds:
if x not in unique_genes_locusIds:
not_found_locusIds.append(x)
if len(not_found_locusIds) > 0:
raise Exception("The following locusIds were not found in the genes file."
" (All locusIds from all.poolcount must also be in the genes"
" file.)"
"', '".join(not_found_locusIds))
def check_exps_df_against_all_df(exps_df, all_df, meta_ix):
"""
We make sure that all the experiment names left in the all_df dataframe
are the same as the experiment names in the rows of the experiments
dataframe.
"""
experiment_names_test = [exps_df['SetName'].iat[i] + "." + exps_df['Index'].iat[i] for i in \
range(len(exps_df['SetName']))]
index_names = list(all_df.head())[meta_ix:]
# Number of rows:
if len(index_names) != exps_df.shape[0]:
raise Exception(f"Number of data columns in {all_fp} does not match"
f" number of rows in {exps_fp}\n"
f"{len(index_names)} != {exps_df.shape[0]}")
for i in range(len(index_names)):
if index_names[i] not in experiment_names_test:
raise Exception(f"Column names in {all_fp} do not match names from"
f"{exps_fp} at index {i}")
logging.debug("There are the same experiment names in all_df and exps_df.")
def prepare_set_names(exps_df, all_df, rules_df,
okControls=False, meta_ix=7, debug_bool=False):
"""
Description:
We replace the SetNames from the complicated version to a simpler one,
remove the period in between SetName and Index in all.poolcount columns,
and make the 'names' column in the experiments file and the all.poolcount columns
have the same values. For example, we move column name from Keio_ML9_set2.IT004 to
set2IT004, and rename the values in the Experiments file similarly.
We also add multiple new columns to exps_df:
"num", "short", "name", "t0set"
We also make sure that any experiment with its "Group" being "Time0" has
its short as "Time0" as well.
We initialize the 't0set' column as being the date + the set name (lane).
"""
# Below is a numpy array, not a series
uniqueSetNames_nparray = exps_df['SetName'].unique()
# shortSetNames is numpy ndarray, shortNamesTranslation_d is a dict which contains
# conversions from original names to short names.
shortSetNames, shortNamesTranslation_d = ShortSetNames(uniqueSetNames_nparray)
if debug_bool:
logging.debug("uniqueSetNames:")
logging.debug(uniqueSetNames_nparray)
logging.debug("shortSetNames")
logging.debug(shortSetNames)
logging.debug("Above 2 arrays should be the same length.")
# We concatenate the string of the set name and the index column
# But first we need to find the original location of the set name
# match_list is a list of indeces (int) for each element in the first list
# where it is found in the second list.
match_list = match_ix(list(exps_df['SetName']), list(uniqueSetNames_nparray))
# We apply the match list to shortSetNames_list to recreate the original SetName order
# just with the newly created 'short' setNames.
short_names_srs = shortSetNames[match_list]
if debug_bool:
logging.info("short_names_srs: (shortSetNames[match_list])")
logging.info(short_names_srs)
logging.info("original set Names:")
logging.info(exps_df['SetName'])
logging.info('match_list')
logging.info(match_list)
# If there are 3 unique set names and 100 items in exps_df['SetName'],
# then match_list will contain 100 items with only 3 different values (0, 1, 2)
# expNamesNew ends up being a list<str>
expNamesNew = []
for i in range(len(short_names_srs)):
if not short_names_srs[i] in [None, np.nan]:
expNamesNew.append(short_names_srs[i] + exps_df['Index'][i])
else:
expNamesNew.append(exps_df['Index'][i])
if debug_bool:
logging.info('expNamesNew:')
logging.info(expNamesNew)
exps_df['num'] = range(1, exps_df.shape[0] + 1)
# We replace certain strings with others using the 'rules' table.
exps_df['short'] = applyRules(rules_df, list(exps_df['Description']))
if okControls:
if not "control_bool" in exps_df.columns:
raise Exception("Using manual control label but no column "
"'control_bool' in Experiments file!")
else:
for ix, val in exps_df["control_bool"].iteritems():
if val.strip().upper() == "TRUE":
exps_df["short"].loc[ix] = "Time0"
else:
# Should not be a Time0 short
if exps_df["short"].loc[ix].upper() == "TIME0":
raise Exception("Description of experiment indicates Time0, but"
f" value in control_bool is not 'True', instead '{val}'.")
if debug_bool:
logging.info("exps_df of col 'short':")
logging.info(exps_df['short'])
# We remove the "." in the names of the values. Just SetNameIndex now
replace_col_d = {list(all_df.head())[meta_ix + i]: expNamesNew[i] for i in range(len(expNamesNew))}
if debug_bool:
logging.info('replace_col_d')
logging.info(replace_col_d)
logging.info('original all_df col names:')
logging.info(list(all_df.columns))
all_df = all_df.rename(columns=replace_col_d)
if debug_bool:
logging.info('after replacement all_df col names:')
logging.info(list(all_df.columns))
exps_df['name'] = expNamesNew
# updating short to include Groups with Time0
num_time_zero = 0
for ix, val in exps_df['Group'].items():
if val.strip().upper() == "TIME0":
num_time_zero += 1
exps_df.loc[ix, 'short'] = "Time0"
# Updating column 't0sets' which refers to the date and SetName
exps_df['t0set'] = [exps_df['Date_pool_expt_started'].iat[ix] + " " + \
val for ix, val in exps_df['SetName'].items()]
if okControls:
if not "control_group" in exps_df.columns:
raise Exception("Using manual control label but no column "
"'control_group' in Experiments file!")
else:
for ix, val in exps_df["control_group"].iteritems():
exps_df['t0set'].loc[ix] = val
if debug_bool:
logging.info('exps_df short: ')
logging.info(exps_df['short'])
logging.info('exps_df t0set: ')
logging.info(exps_df['t0set'])
logging.info(f"Total number of time zeros: {num_time_zero}")
return exps_df, all_df, replace_col_d
def ShortSetNames(set_names_nparray, dbg_lvl=0):
""" Using a table with rules, shorten the names of these sets
Args:
set_names_nparray (numpy.ndarray): Array of string, unique set names from exps file
Returns:
set_names_nparray (numpy.ndarray): Edited set Names to be
in the format setX* or testX*
This might convert
[ Keio_ML9_set2, Keio_ML9_set2, Keio_ML9_set2, ..., Keio_ML9_set3, Keio_ML9_set3,..., Keio_ML9_set3]
to
[ set2, set2, set2, ..., set3, set3, ..., set3]
"""
set_names_nparray = np.copy(set_names_nparray)
# Below returns a TRUE/FALSE vector indicating which
# elements of the character vector contain a match (i.o.w a simple name)
simple = [bool(re.search(r"(set|test)[0-9A-Z]+[0-9A-Z0-9]*$", x)) for x in set_names_nparray]
if dbg_lvl > 0:
if len(simple) > 0:
logging.debug("simple names: \n" + ",".join(list([str(x) for x in simple])))
else:
logging.debug("No simple names found.")
# We edit the values of set_names_nparray who are true for simple
# by removing anything before 'set' or 'test'
# We count the number of values that were false
nleft = 0
simple_set_names = []
for i in range(len(simple)):
if simple[i]:
new_set_name = re.sub("^.*(set|test)", "\\1", set_names_nparray[i])
set_names_nparray[i] = new_set_name
simple_set_names.append(new_set_name)
else:
nleft += 1
if dbg_lvl > 0:
logging.debug("fixed set_names:\n" + ",".join(list(set_names_nparray)))
candidates = []
for x in "A.B.C.D.E.F.G.H.I.J.K.L.M.N.O.P.Q.R.S.T.U.V.W.X.Y.Z".split("."):
candidates.append("set" + x)
if dbg_lvl > 0:
logging.debug(candidates)
# get the elements in candidates that are not in set_names_nparray[simple]
candidates = [x for x in candidates if x not in simple_set_names]
if (nleft > len(candidates)):
raise Exception(f"Too many unexpected set names: {nleft}.\n To fix this, contact developer "
"and say to change the number of possible extensions in list candidates (A.B...Z).")
# Get the non-simple values from set_names_nparray
oldComplex = [x for x in set_names_nparray if x not in simple_set_names]
if dbg_lvl > 0:
logging.debug("oldComplex:\n" + ",".join(oldComplex))
cnd_ix = 0
translation_dict = {}
for i in range(len(simple)):
if not simple[i]:
logging.info(f"Set {set_names_nparray[i]} simplified to {candidates[cnd_ix]}")
translation_dict[set_names_nparray[i]] = candidates[cnd_ix]
set_names_nparray[i] = candidates[cnd_ix]
cnd_ix += 1
crnt_unq = list(pd.Series(set_names_nparray).unique())
repeats = []
for x in list(set_names_nparray):
if x in crnt_unq:
crnt_unq.remove(x)
else:
repeats.append(x)
if not (len(repeats) == 0):
raise Exception("Non-unique set names! :\n" + \
", ".join(repeats))
else:
logging.debug("Finished running short set names")
if dbg_lvl > 0:
logging.debug("Final set names list: " + ", ".join(set_names_nparray))
return set_names_nparray, translation_dict
def get_special_lists(data_dir, all_df, replace_col_d, debug_bool=False):
"""
Args:
replace_col_d: Dict mapping original all_df experiment name to replacement name
data_dir
Returns:
genesUsed_list list<str>: LocusIds of genes to use
ignore_list: List<str> New names for the experiments we want to ignore.
Description: We get the lists from the files in data_dir if they are there.
Otherwise we return their values as empty lists. The lists we
look for are genesUsed, which should be a list of locusIds
from this genome that we are using, and ignore_list, which is a list
of experiment names to ignore (columns from all.poolcount)
"""
genesUsed_list = []
ignore_list = []
# list of locusIds
genesUsed_fp = os.path.join(data_dir, "strainusage.genes.json")
# list of extra ignored experiments
ignore_list_fp = os.path.join(data_dir, "ignore_list.json")
if os.path.isfile(genesUsed_fp) and os.access(genesUsed_fp, os.R_OK):
genesUsed_list = json.loads(open(GenesUsed_fp).read())
logging.info(f"Loaded {len(genesUsed_list)} genes to include in the "
"analysis\n")
if os.path.isfile(ignore_list_fp) and os.access(ignore_list_fp, os.R_OK):
pre_ignore_list = json.loads(open(ignore_list_fp).read())
for x in pre_ignore_list:
if x in replace_col_d:
ignore_list.append(x)
else:
raise Exception(f"Avoid list contains experiment {x} but experiment name"
" not found in all.poolcount."
f" Possible names: {', '.join(list(replace_col_d.keys()))}")
ignore_list = [replace_col_d[x] for x in ignore_list]
return genesUsed_list, ignore_list
def applyRules(rules_df, desc_str_list):
"""
We replace str value in V1 with value in V2
Args:
rules_df: data frame with cols:
V1, V2
desc_str_list: list<str>
Returns:
new_desc_list: list<str>
"""
new_desc_list = []
for j in range(len(desc_str_list)):
new_desc_list.append(desc_str_list[j])
for i in range(0, rules_df.shape[0]):
new_desc_list[-1] = new_desc_list[-1].replace(rules_df["V1"].iloc[i],
rules_df["V2"].iloc[i])
return new_desc_list
| 2.703125 | 3 |
python/latex_plots.py | kjetil-lye/phd_thesis_standalone_plots | 0 | 12788476 | import matplotlib
matplotlib.rcParams['savefig.dpi'] = 600
# see https://stackoverflow.com/a/46262952 (for norm symbol)
# and https://stackoverflow.com/a/23856968
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.preamble'] = [
r'\usepackage{amsmath}',
r'\usepackage{amsfonts}',
r'\usepackage{amssymb}']
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
| 2.109375 | 2 |
core/utils/nagisa.py | darakudou/setlist_forecast | 0 | 12788477 | import nagisa
from core.models import Music
class Nagisa():
def __init__(self, idol):
self.musics = Music.objects.filter(artist=idol).values("id", "identifications_title")
self.overide_nagisa = nagisa.Tagger(
single_word_list=list(self.musics.values_list("identifications_title", flat=True))
)
def extract(self, text):
return self.overide_nagisa.extract(text, extract_postags=["名詞", "英単語"])
def hit_title_ids(self, text):
# TODO: 馬鹿っぽいけどとりあえずこれで...
text = text.replace("moon light", "moonlight")
text = text.replace("in the Dark", "inthedark")
token = self.extract(text)
hit_title_ids = []
identification_titles = [m["identifications_title"] for m in self.musics]
for word in token.words:
if word in identification_titles:
hit_title_ids.append(Music.objects.get(identifications_title=word).id)
return hit_title_ids
| 2.484375 | 2 |
readability/text/syllables.py | rbamos/py-readability-metrics | 198 | 12788478 | import re
def count(word):
"""
Simple syllable counting
"""
word = word if type(word) is str else str(word)
word = word.lower()
if len(word) <= 3:
return 1
word = re.sub('(?:[^laeiouy]es|[^laeiouy]e)$', '', word) # removed ed|
word = re.sub('^y', '', word)
matches = re.findall('[aeiouy]{1,2}', word)
return len(matches)
| 4.09375 | 4 |
janusbackup/worker/jobs/test_job.py | NikitosnikN/janus-backup | 0 | 12788479 | import asyncio
from schedule import Scheduler
from janusbackup.logger import logger
from janusbackup.worker.jobs import BaseJob
class TestJob(BaseJob):
is_active = False
@staticmethod
async def _job(*args, **kwargs):
logger.debug("Hello world for TestJob")
@classmethod
def set_schedule_job(cls, scheduler: Scheduler, loop: asyncio.BaseEventLoop, *args, **kwargs):
scheduler.every(5).seconds.do(cls.get_schedule_job(), loop=loop, *args, **kwargs)
| 2.34375 | 2 |
amftrack/pipeline/functions/post_processing/global_plate.py | Cocopyth/MscThesis | 1 | 12788480 | import networkx as nx
def num_hypha(exp,args):
return("num_hypha",len(exp.hyphaes))
def prop_lost_tracks_junction(exp,args):
lost = 0
tracked = 0
lapse = args[0]
# for node in exp.nodes:
# t0 = node.ts()[0]
# if node.degree(t0) >=3 and t0 + lapse < exp.ts:
# if node.is_in(t0+lapse):
# tracked+=len(node.ts())
# else:
# lost += len(node.ts())
for t in range(exp.ts-lapse):
for node in exp.nodes:
if node.is_in(t) and node.degree(t)>=3:
if node.is_in(t+lapse):
tracked+=1
else:
lost += 1
return(f'prop_lost_track_junction_lape{lapse}', lost/(lost+tracked))
def prop_lost_tracks_tips(exp,args):
lost = 0
tracked = 0
lapse = args[0]
# for node in exp.nodes:
# t0 = node.ts()[0]
# if node.degree(t0) ==1 and t0 + lapse < exp.ts:
# if node.is_in(t0+lapse):
# tracked+=len(node.ts())
# else:
# lost += len(node.ts())
for t in range(exp.ts-lapse):
for node in exp.nodes:
if node.is_in(t) and node.degree(t)==1:
if node.is_in(t+lapse):
tracked+=1
else:
lost += 1
return(f'prop_lost_track_tips_lape{lapse}', lost/(lost+tracked))
def prop_inconsistent_root(exp,args):
return('inconsist_root',len(exp.inconsistent_root)/len(exp.hyphaes))
def number_of_timepoints_withing_boundaries(exp,args):
return('num_timepoint_within', int(exp.reach_out))
def number_of_timepoints(exp,args):
return('number_timepoints', int(exp.ts)) | 3.125 | 3 |
pysplit/member.py | polynomialchaos/pysplit | 0 | 12788481 | <reponame>polynomialchaos/pysplit
# MIT License
#
# Copyright (c) 2021 Florian
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .utils import BaseClass, now
class Member(BaseClass):
"""Member class derived from pysplit base class."""
def __init__(self, group, name, stamp=now()):
"""Member class initialization.
Keyword arguments:
group -- group object
name -- member name
stamp -- a datetime object, a serialized datetime object or a datetime string (default now())
"""
super().__init__(stamp=stamp)
self.group = group
self.name = name
self.purchases = []
self.transfers = []
self.receives = []
def __str__(self):
return '{:} ({:.2f}{:})'.format(self.name, self.balance, self.group.currency)
def _serialize(self):
"""Convert the object to a JSON conform dictionary and return it."""
return {
'name': self.name
}
def add_purchase(self, purchase):
"""Add a purchase reference to the member.
Keyword arguments:
purchase -- a purchase object reference
"""
self.purchases.append(purchase)
def add_receive(self, receive):
"""Add a receive reference to the member.
Keyword arguments:
receive -- a purchase of transfer object reference
"""
self.receives.append(receive)
def add_transfer(self, transfer):
"""Add a transfer reference to the member.
Keyword arguments:
transfer -- a transfer object reference
"""
self.transfers.append(transfer)
@property
def balance(self):
"""Calculate the member balance and return the value in groups currency."""
balance = sum([x.amount for x in self.purchases])
balance += sum([x.amount for x in self.transfers])
balance -= sum([x.get_member_amount(self.name)
for x in self.receives])
return balance
def remove_purchase(self, purchase):
"""Remove a purchase reference from the member.
Keyword arguments:
transfer -- a purchase object reference
"""
self.purchases.remove(purchase)
def remove_receive(self, receive):
"""Remove a receive reference from the member.
Keyword arguments:
transfer -- a purchase or transfer object reference
"""
self.receives.remove(receive)
def remove_transfer(self, transfer):
"""Remove a transfer reference from the member.
Keyword arguments:
transfer -- a transfer object reference
"""
self.transfers.remove(transfer)
| 2.171875 | 2 |
scrap.py | silverkip/space-app | 0 | 12788482 | import json
import requests
from bs4 import BeautifulSoup
def updatePlaces(key):
'''Puts the launch places (address and coords) in a json'''
link = "http://www.spaceflightinsider.com/launch-schedule/"
places = {}
for url in [link, link+"?past=1"]:
page = requests.get(url)
soup = BeautifulSoup(page.text, 'lxml')
for tag in soup.select("table.launchcalendar"):
result = {}
details = tag.find(class_="launchdetails").find_all("tr")
for detail in details:
result[detail.th.string.lower()] = detail.td.get_text()
place = result['location'].split(' ')
result['location'] = ' '.join(place[:-1])
coordinates = places.get(result['location'], geocode(result['location'], key))
places[result['location']] = coordinates
with open('places.txt', 'w') as fout:
json.dump(places, fout)
return places
def getLaunches(past=False):
''' Returns a dict containing info about future launches '''
url = "http://www.spaceflightinsider.com/launch-schedule/"
if past:
url += "?past=1"
page = requests.get(url)
soup = BeautifulSoup(page.text, 'lxml')
launches = []
places = {}
with open('places.txt') as fin:
places = json.load(fin)
for tag in soup.select("table.launchcalendar"):
result = {}
details = tag.find(class_="launchdetails").find_all("tr")
for detail in details:
result[detail.th.string.lower()] = detail.td.get_text()
style = tag.find(class_='vehicle').div['style']
index = style.index("http")
result['image'] = style[index:-3]
result['mission'] = tag.find(colspan='2').get_text()
result['description'] = tag.find(class_='description').p.get_text()
place = result['location'].split(' ')
result['location'] = ' '.join(place[:-1])
result['pad'] = place[-1]
coordinates = places.get(result['location'], None)
if coordinates:
result['long'] = coordinates.get('lng', None)
result['lat'] = coordinates.get('lat', None)
launches.append(result)
return launches
def geocode(address, key):
''' converts address string to lat-long coordinates '''
address = address.replace(' ', '+')
url = f"https://maps.googleapis.com/maps/api/geocode/json?key={key}&address={address}"
response = requests.get(url).json()
if not response['results']:
print('oopsy')
return {}
coordinates = response['results'][0]['geometry']['location']
for k, v in coordinates.items():
coordinates[k] = round(v, 7)
return coordinates
if __name__ == '__main__':
from pprint import pprint
#print('Please enter your Google API key:')
#key = input()
#updatePlaces(key)
launches = getLaunches()
for l in launches:
pprint(l['mission'])
pprint(l['location'])
pprint(l['lat'])
print()
| 3.140625 | 3 |
LeetcodeAlgorithms/337. House Robber III/house-robber-iii.py | Fenghuapiao/PyLeetcode | 3 | 12788483 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def dfs(root):
if not root:
return 0,0
lpre, lppre = dfs(root.left)
rpre, rppre = dfs(root.right)
return max(root.val + lppre + rppre, lpre + rpre), lpre + rpre
return dfs(root)[0]
| 3.65625 | 4 |
glow/transforms/classes.py | arquolo/ort | 0 | 12788484 | <gh_stars>0
from __future__ import annotations
__all__ = [
'ChannelMix', 'ChannelShuffle', 'CutOut', 'BitFlipNoise', 'Elastic',
'LumaJitter', 'DegradeJpeg', 'DegradeQuality', 'FlipAxis', 'HsvShift',
'MaskDropout', 'MultiNoise', 'WarpAffine'
]
from dataclasses import InitVar, dataclass, field
from typing import Any
import cv2
import numpy as np
from scipy.stats import ortho_group
from . import functional as F
from .core import DualStageTransform, ImageTransform, MaskTransform
# ---------------------------------- mixins ----------------------------------
class _LutTransform(ImageTransform):
def get_lut(self,
rng: np.random.Generator) -> np.ndarray | list[np.ndarray]:
raise NotImplementedError
def image(self, image: np.ndarray, rng: np.random.Generator) -> np.ndarray:
assert image.dtype == np.uint8
lut = self.get_lut(rng)
if isinstance(lut, np.ndarray):
return cv2.LUT(image.ravel(), lut).reshape(image.shape)
assert len(lut) == image.shape[2]
planes = map(cv2.LUT, cv2.split(image), lut)
return cv2.merge([*planes]).reshape(image.shape)
# ---------------------------------- noise ----------------------------------
class AddNoise(ImageTransform):
"""Add uniform[-strength ... +strength] to each item"""
def __init__(self, strength: float = 0.2) -> None:
self.strength = int(strength * 255)
def __repr__(self) -> str:
return f'{type(self).__name__}(strength={self.strength})'
def image(self, image: np.ndarray, rng: np.random.Generator) -> np.ndarray:
assert image.dtype == np.uint8
res = rng.integers(
-self.strength, self.strength, size=image.shape, dtype='i2')
res += image
return res.clip(0, 255).astype('u1')
class MultiNoise(ImageTransform):
"""Multiply uniform[1 - strength ... 1 + strength] to each item"""
def __init__(self, strength: float = 0.5) -> None:
self.low = max(0, 1 - strength)
self.high = 1 + strength
def __repr__(self) -> str:
return f'{type(self).__name__}(low={self.low}, high={self.high})'
def image(self, image: np.ndarray, rng: np.random.Generator) -> np.ndarray:
assert image.dtype == np.uint8
res = rng.random(image.shape, dtype='f4')
res *= self.high - self.low
res += self.low
res *= image # Multiply
return res.clip(0, 255).astype('u1')
@dataclass
class BitFlipNoise(ImageTransform):
bitdepth: int = 4
def image(self, image: np.ndarray, rng: np.random.Generator) -> np.ndarray:
assert image.dtype.kind == 'u'
planes = 8 * image.dtype.itemsize
if self.bitdepth >= planes:
return image
high_flip = 1 << (planes - self.bitdepth)
bitmask = (1 << planes) - high_flip
res = image & bitmask
res += rng.integers(high_flip, size=image.shape, dtype=image.dtype)
return res
# ----------------------------- color alteration -----------------------------
class ChannelShuffle(ImageTransform):
def __repr__(self) -> str:
return f'{type(self).__name__}()'
def image(self, image: np.ndarray, rng: np.random.Generator) -> np.ndarray:
assert image.ndim == 3
return rng.permutation(image, axis=-1)
@dataclass
class ChannelMix(ImageTransform):
intensity: tuple[float, float] = (0.5, 1.5)
def image(self, image: np.ndarray, rng: np.random.Generator) -> np.ndarray:
assert image.ndim == 3
assert image.dtype == np.uint8
image = image.astype('f4')
num_channels = image.shape[-1]
mat = ortho_group.rvs(num_channels, random_state=rng).astype('f4')
mat *= rng.uniform(*self.intensity)
lumat = np.full((num_channels, num_channels), 1 / num_channels)
image = image @ ((np.eye(num_channels) - lumat) @ mat + lumat)
return image.clip(0, 255).astype('u1') # type: ignore
@dataclass
class LumaJitter(_LutTransform):
brightness: tuple[float, float] = (-0.2, 0.2)
contrast: tuple[float, float] = (0.8, 1.2)
def get_lut(self, rng: np.random.Generator) -> np.ndarray:
lut = np.arange(256, dtype='f4')
lut += 256 * rng.uniform(*self.brightness)
lut = (lut - 128) * rng.uniform(*self.contrast) + 128
return lut.clip(0, 255).astype('u1')
@dataclass
class GammaJitter(_LutTransform):
"""Alters gamma from [1/(1+gamma) ... 1+gamma]"""
gamma: float = 0.2
def __post_init__(self):
assert self.gamma >= 0
def get_lut(self, rng: np.random.Generator) -> np.ndarray:
lut = np.linspace(0, 1, num=256, dtype='f4')
max_gamma = 1 + self.gamma
lut **= rng.uniform(1 / max_gamma, max_gamma)
lut *= 255
return lut.clip(0, 255).astype('u1') # type: ignore
@dataclass
class HsvShift(_LutTransform):
max_shift: int = 20
def get_lut(self, rng: np.random.Generator) -> list[np.ndarray]:
hue, sat, val = rng.uniform(-self.max_shift, self.max_shift, size=3)
ramp = np.arange(256, dtype='i2')
luts = (
(ramp + hue) % 180,
(ramp + sat).clip(0, 255),
(ramp + val).clip(0, 255),
)
return [lut.astype('u1') for lut in luts]
def image(self, image: np.ndarray, rng: np.random.Generator) -> np.ndarray:
assert image.ndim == image.shape[-1] == 3
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
image = super().image(image, rng)
return cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
# ------------------------------- compression -------------------------------
@dataclass
class DegradeJpeg(ImageTransform):
quality: tuple[int, int] = (0, 15)
def image(self, image: np.ndarray, rng: np.random.Generator) -> np.ndarray:
quality = int(rng.integers(*self.quality))
_, buf = cv2.imencode('.jpg', image,
(cv2.IMWRITE_JPEG_QUALITY, quality))
return cv2.imdecode(buf, cv2.IMREAD_UNCHANGED).reshape(image.shape)
@dataclass
class DegradeQuality(ImageTransform):
scale: tuple[float, float] = (0.25, 0.5)
modes: tuple[str, ...] = ('NEAREST', 'LINEAR', 'INTER_CUBIC', 'AREA')
def image(self, image: np.ndarray, rng: np.random.Generator) -> np.ndarray:
shape = image.shape
scale = rng.uniform(*self.scale)
# downscale
mode = getattr(cv2, f'INTER_{rng.choice(self.modes)}')
image = cv2.resize(image, None, fx=scale, fy=scale, interpolation=mode)
# upscale
mode = getattr(cv2, f'INTER_{rng.choice(self.modes)}')
image = cv2.resize(image, shape[1::-1], interpolation=mode)
return image.reshape(shape)
# ----------------------------- mask alteration -----------------------------
@dataclass
class MaskDropout(MaskTransform):
"""
Drops redundant pixels for each class,
so that np.bincount(mask.ravel()) <= alpha * mask.size
"""
alpha: float
ignore_index: int = -1
def mask(self, mask: np.ndarray, rng: np.random.Generator) -> np.ndarray:
return F.mask_dropout(
mask, rng, alpha=self.alpha, ignore_index=self.ignore_index)
# --------------------------------- geometry ---------------------------------
@dataclass
class FlipAxis(DualStageTransform):
"""
Flips image/mask vertically/horizontally & rotate by 90 at random.
In non-isotropic mode (default) flips only horizontally
"""
isotropic: bool = False
def prepare(self, rng: np.random.Generator, /, **_) -> dict[str, Any]:
ud, lr, rot90 = rng.integers(2, size=3)
if not self.isotropic:
ud = rot90 = 0
return {'ud': ud, 'lr': lr, 'rot90': rot90}
def image(self, image: np.ndarray, **params) -> np.ndarray:
return F.flip(image, **params)
def mask(self, mask: np.ndarray, **params) -> np.ndarray:
return F.flip(mask, **params)
@dataclass
class WarpAffine(DualStageTransform):
angle: float = 180
skew: float = 0.5
scale: tuple[float, float] = (1.0, 1.0)
inter: InitVar[str] = 'LINEAR'
_inter: int = field(init=False)
def __post_init__(self, inter: str):
self._inter = getattr(cv2, f'INTER_{inter}')
def prepare(self, rng: np.random.Generator, /, **_) -> dict[str, Any]:
return {
'skew': rng.uniform(-self.skew, self.skew),
'angle': rng.uniform(-self.angle, self.angle),
'scale': rng.uniform(*self.scale),
}
def image(self, image: np.ndarray, **params) -> np.ndarray:
return F.affine(image, **params, inter=self._inter)
def mask(self, mask: np.ndarray, **params) -> np.ndarray:
return F.affine(mask, **params, inter=cv2.INTER_NEAREST)
@dataclass
class Elastic(DualStageTransform):
"""Elastic deformation of image
Parameters:
- scale - max shift for each pixel
- sigma - size of gaussian kernel
"""
scale: float = 1
sigma: float = 50
inter: InitVar[str] = 'LINEAR'
_inter: int = field(init=False)
def __post_init__(self, inter: str):
self._inter = getattr(cv2, f'INTER_{inter}')
def prepare(self, rng: np.random.Generator, /, image: np.ndarray,
**_) -> dict[str, Any]:
offsets = rng.random((2, *image.shape[:2]), dtype='f4')
offsets *= self.scale * 2
offsets -= self.scale
for dim, (off, size) in enumerate(zip(offsets, image.shape[:2])):
shape = np.where(np.arange(2) == dim, size, 1)
off += np.arange(size).reshape(shape)
cv2.GaussianBlur(off, (17, 17), self.sigma, dst=off)
return {'offsets': offsets[::-1]}
def _apply(self, image: np.ndarray, inter: int, **params) -> np.ndarray:
map_x, map_y = params['offsets']
return cv2.remap(
image, map_x, map_y, inter, borderMode=cv2.BORDER_REFLECT_101)
def image(self, image: np.ndarray, **params) -> np.ndarray:
return self._apply(image, self._inter, **params)
def mask(self, mask: np.ndarray, **params) -> np.ndarray:
return self._apply(mask, cv2.INTER_NEAREST, **params)
@dataclass
class CutOut(ImageTransform):
max_holes: int = 80
size: int = 8
fill_value: int = 0
def image(self, image: np.ndarray, rng: np.random.Generator) -> np.ndarray:
num_holes = rng.integers(self.max_holes)
if not num_holes:
return image
anchors = rng.integers(0, image.shape[:2], size=(num_holes, 2))
# [N, dims, (min, max)]
holes = anchors[:, :, None] + [-self.size // 2, self.size // 2]
holes = holes.clip(0, np.array(image.shape[:2])[:, None])
image = image.copy()
for (y0, y1), (x0, x1) in holes:
image[y0:y1, x0:x1] = self.fill_value
return image
| 2.046875 | 2 |
recc/emulators/python/linux-emulator-example.py | oscourse-tsinghua/OS2018spring-projects-g02 | 249 | 12788485 | <gh_stars>100-1000
# Copyright 2016 <NAME> Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import select
import time
import sys, tty, termios
from op_cpu_package.python_l0_module import OpCPUData
from op_cpu_package.op_cpu_module import OpCPU
# A Linux interface to using the python implementation of the One Page CPU emulator
def main_loop():
loader_data = OpCPUData()
op_cpu = OpCPU(loader_data)
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
tty.setraw(sys.stdin.fileno())
input_buffer = [] # Characters to be sent to emulator
while not op_cpu.is_halted():
in_chrs = select.select([sys.stdin], [], [], 0.0001)[0]
if not in_chrs:
for x in range(0, 10000):
r = op_cpu.vm_getc()
if 'chr' in r:
sys.stdout.write(chr(r['chr']))
if r['chr'] == 10:
sys.stdout.write('\r')
sys.stdout.flush()
if len(input_buffer):
inchr = input_buffer.pop()
if op_cpu.vm_putc(inchr): # Not able to input chr
input_buffer = [inchr] + input_buffer
op_cpu.step()
else:
dobreak = False
for file in in_chrs:
c = file.read(1)
input_buffer = input_buffer + [ord(c)]
if ord(c) == 3:
dobreak = True
if dobreak:
break
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
main_loop()
| 2.515625 | 3 |
microcosm_sqlite/tests/test_threading.py | globality-corp/microcosm-sqlite | 0 | 12788486 | """
Threading tests.
"""
from multiprocessing.pool import ThreadPool
from tempfile import NamedTemporaryFile
from microcosm.api import create_object_graph
from microcosm.loaders import load_from_dict
from hamcrest import assert_that, contains
from microcosm_sqlite.context import SessionContext
from microcosm_sqlite.stores import GetOrCreateSession
from microcosm_sqlite.tests.fixtures import Example, Person, PersonStore
def test_threading():
with NamedTemporaryFile() as tmp_file:
loader = load_from_dict(
sqlite=dict(
paths=dict(
example=tmp_file.name,
),
),
)
graph = create_object_graph("example", testing=True, loader=loader)
store = PersonStore()
Person.recreate_all(graph)
with SessionContext(graph, Example) as context:
gw = store.create(
Person(id=1, first="George", last="Washington"),
)
tj = store.create(
Person(id=2, first="Thomas", last="Jefferson"),
)
context.commit()
pool = ThreadPool(2)
store.get_session = GetOrCreateSession(graph)
people = pool.map(lambda index: store.search()[index], range(2))
assert_that(people, contains(gw, tj))
| 2.625 | 3 |
cloud_addresses.py | auspex-labs/cloud-ipaddresses | 3 | 12788487 | <reponame>auspex-labs/cloud-ipaddresses<gh_stars>1-10
import re
import json
from ipaddress import ip_network
import requests
AWS_SOURCE = "https://ip-ranges.amazonaws.com/ip-ranges.json"
AZURE_SOURCE = "https://www.microsoft.com/en-us/download/confirmation.aspx?id=56519"
GPC_SOURCE = "https://www.gstatic.com/ipranges/cloud.json"
OCEAN_SOURCE = "http://digitalocean.com/geo/google.csv"
ORACLE_SOUCE = "https://docs.oracle.com/iaas/tools/public_ip_ranges.json"
IPV4_FILE = "cloud_networks_4.json"
IPV6_FILE = "cloud_networks_6.json"
def aws(url=AWS_SOURCE):
aws_ranges = json.loads(requests.get(url).content)
aws_ipv4prefixes = set()
for prefix in aws_ranges["prefixes"]:
aws_ipv4prefixes.add(ip_network(prefix["ip_prefix"]))
aws_ipv6prefixes = set()
for prefix in aws_ranges["ipv6_prefixes"]:
aws_ipv6prefixes.add((ip_network(prefix["ipv6_prefix"])))
return aws_ipv4prefixes, aws_ipv6prefixes
def azure(url=AZURE_SOURCE):
azure_address_page = requests.get(url)
azure_ranges = json.loads(requests.get(re.findall(r"https://download.*?\.json", azure_address_page.text)[0]).content)
az_ipv4prefixes = set()
az_ipv6prefixes = set()
for prefix in azure_ranges["values"]:
for network in prefix["properties"]["addressPrefixes"]:
net = ip_network(network)
if net.version == 4:
az_ipv4prefixes.add(net)
elif net.version == 6:
az_ipv6prefixes.add(net)
else:
continue
return az_ipv4prefixes, az_ipv6prefixes
def gpc(url=GPC_SOURCE):
gpc_ranges = json.loads(requests.get(url).content)
gpc_ipv4prefixes = set()
gpc_ipv6prefixes = set()
for prefix in gpc_ranges["prefixes"]:
if prefix.get("ipv4Prefix") is not None:
gpc_ipv4prefixes.add(ip_network(prefix.get("ipv4Prefix")))
if prefix.get("ipv6Prefix") is not None:
gpc_ipv6prefixes.add(ip_network(prefix.get("ipv6Prefix")))
return ipv4prefixes, ipv6prefixes
def ocean(url=OCEAN_SOURCE):
ocean_ranges = requests.get(url).content
do_ipv4prefixes = set()
do_ipv6prefixes = set()
for prefix in ocean_ranges.splitlines():
net = ip_network(prefix.decode("utf-8").split(",")[0])
if net.version == 4:
do_ipv4prefixes.add(net)
elif net.version == 6:
do_ipv6prefixes.add(net)
else:
continue
return do_ipv4prefixes, do_ipv6prefixes
def oracle(url=ORACLE_SOUCE):
oracle_ranges = json.loads(requests.get(url).content)
orc_ipv4prefixes = set()
orc_ipv6prefixes = set()
for cidrs in oracle_ranges["regions"]: # TODO Needs better variable names
for cidr in cidrs["cidrs"]:
net = ip_network(cidr["cidr"])
if net.version == 4:
orc_ipv4prefixes.add(net)
elif net.version == 6:
orc_ipv6prefixes.add(net)
else:
continue
return ipv4prefixes, ipv6prefixes
def merge_networks(prefixes):
cidr = list(prefixes)
cidr.sort()
cidr = [str(net) for net in cidr]
# Find and merge adjacent CIDRs.
networks = dict()
for net in cidr:
if int(net.split("/")[1]) not in networks.keys():
networks.update({(int(net.split("/")[1])): []})
networks[int(net.split("/")[1])].append(net)
updates = True
while updates:
updates = False
for mask in sorted(networks.copy(), reverse=False):
for network in networks[mask].copy():
complete = True
for sub in ip_network(network).supernet().subnets():
if str(sub) not in networks[mask]:
complete = False
if complete:
updates = True
supernet = str(ip_network(network).supernet())
if int(supernet.split("/")[1]) in networks:
networks[int(supernet.split("/")[1])].append(supernet)
else:
networks.update({(int(supernet.split("/")[1])): [supernet]})
for sub in ip_network(network).supernet().subnets():
networks[mask].remove(str(sub))
# net_count = 0
# for net in networks:
# net_count += len(networks[net])
return networks
def write_networks(networks, network_file):
with open(network_file, "w") as open_file:
json.dump(networks, open_file, indent=4, sort_keys=True)
ipv4prefixes = set()
ipv6prefixes = set()
aws4, aws6 = aws()
azure4, azure6 = azure()
gpc4, gpc6 = gpc()
ocean4, ocean6 = ocean()
oracle4, oracle6 = oracle()
ipv4prefixes.update(aws4)
ipv4prefixes.update(azure4)
ipv4prefixes.update(gpc4)
ipv4prefixes.update(ocean4)
ipv4prefixes.update(oracle4)
ipv6prefixes.update(aws6)
ipv6prefixes.update(azure6)
ipv6prefixes.update(gpc6)
ipv6prefixes.update(ocean6)
ipv6prefixes.update(oracle6)
ipv4nets = merge_networks(ipv4prefixes)
ipv6nets = merge_networks(ipv6prefixes)
print(len(ipv4nets))
# write_networks(ipv4nets, IPV4_FILE)
# write_networks(ipv6nets, IPV6_FILE)
| 2.5625 | 3 |
paraproc.py | herrlich10/paraproc | 0 | 12788488 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 herrlich10
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function, division, absolute_import, unicode_literals
import sys, shlex, time
import subprocess, multiprocessing, ctypes
import numpy as np
__author__ = 'herrlich10 <<EMAIL>>'
__version__ = '0.1.3'
if sys.version_info[0] == 3:
string_types = (str,)
else:
string_types = (basestring,)
def cmd_for_exec(cmd, cmd_kws):
'''
Format cmd appropriately for execution according to whether shell=True.
Split the cmd string into a list, if not shell=True.
Join the cmd list into a string, if shell=True.
Do nothing to callable.
'''
if not callable(cmd):
if 'shell' in cmd_kws and cmd_kws['shell']: # cmd string is required
if not isinstance(cmd, string_types):
cmd = ' '.join(cmd)
else: # cmd list is required
if isinstance(cmd, string_types):
cmd = shlex.split(cmd) # Split by space, preserving quoted substrings
return cmd
def cmd_for_disp(cmd):
'''
Format cmd for printing.
'''
if isinstance(cmd, list):
return ' '.join(cmd)
else:
return cmd
def format_duration(duration, format='standard'):
'''
Format duration (in seconds) in a more human friendly way.
'''
if format == 'short':
units = ['d', 'h', 'm', 's']
elif format == 'long':
units = [' days', ' hours', ' minutes', ' seconds']
else:
units = [' day', ' hr', ' min', ' sec']
values = [int(duration//86400), int(duration%86400//3600), int(duration%3600//60), duration%60]
for K in range(len(values)): # values[K] would be the first non-zero value
if values[K] > 0:
break
formatted = ((('%d' if k<len(values)-1 else '%.3f') % values[k]) + units[k] for k in range(len(values)) if k >= K)
return ' '.join(formatted)
class PooledCaller(object):
'''
Execute multiple command line programs, as well as python callables,
asynchronously and parallelly across a pool of processes.
'''
def __init__(self, pool_size=None):
if pool_size is None:
self.pool_size = multiprocessing.cpu_count() * 3 // 4
else:
self.pool_size = pool_size
self.ps = []
self.cmd_queue = []
self._n_cmds = 0 # Accumulated counter for generating cmd idx
self._pid2idx = {}
self._return_codes = []
def check_call(self, cmd, *args, **kwargs):
'''
Asynchronous check_call (queued execution, return immediately).
See subprocess.Popen() for more information about the arguments.
Multiple commands can be separated with ";" and executed sequentially
within a single subprocess in linux/mac, only if shell=True.
Python callable can also be executed in parallel via multiprocessing.
Note that only the return code of the child process will be retrieved
later when calling wait(), not the actual return value of the callable.
So the result of the computation needs to be saved in a file.
Parameters
----------
cmd : list, str, or callable
Computation in command line programs is handled with subprocess.
Computation in python callable is handled with multiprocessing.
shell : bool
If provided, must be a keyword argument.
If shell is True, the command will be executed through the shell.
*args, **kwargs :
If cmd is a callable, *args and **kwargs are passed to the callable as its arguments.
If cmd is a list or str, **kwargs are passed to subprocess.Popen().
'''
cmd = cmd_for_exec(cmd, kwargs)
self.cmd_queue.append((self._n_cmds, cmd, args, kwargs))
self._n_cmds += 1
def dispatch(self):
# If there are free slot and more jobs
while len(self.ps) < self.pool_size and len(self.cmd_queue) > 0:
idx, cmd, args, kwargs = self.cmd_queue.pop(0)
print('>> job {0}: {1}'.format(idx, cmd_for_disp(cmd)))
if callable(cmd):
p = multiprocessing.Process(target=cmd, args=args, kwargs=kwargs)
p.start()
else:
p = subprocess.Popen(cmd, **kwargs)
self.ps.append(p)
self._pid2idx[p.pid] = idx
def wait(self):
'''
Wait for all jobs in the queue to finish.
Returns
-------
codes : list
The return code of the child process for each job.
'''
self._start_time = time.time()
while len(self.ps) > 0 or len(self.cmd_queue) > 0:
# Dispatch jobs if possible
self.dispatch()
# Poll workers' state
for p in self.ps:
if isinstance(p, subprocess.Popen) and p.poll() is not None: # If the process is terminated
self._return_codes.append((self._pid2idx[p.pid], p.returncode))
self.ps.remove(p)
elif isinstance(p, multiprocessing.Process) and not p.is_alive(): # If the process is terminated
self._return_codes.append((self._pid2idx[p.pid], p.exitcode))
self.ps.remove(p)
time.sleep(0.1)
codes = [code for idx, code in sorted(self._return_codes)]
duration = time.time() - self._start_time
print('>> All {0} jobs done in {1}.'.format(self._n_cmds, format_duration(duration)))
if np.any(codes):
print('returncode: {0}', codes)
else:
print('all returncodes are 0.')
self._n_cmds = 0
self._pid2idx = {}
self._return_codes = []
return codes
class ArrayWrapper(type):
'''
This is the metaclass for classes that wrap an np.ndarray and delegate
non-reimplemented operators (among other magic functions) to the wrapped array.
'''
def __init__(cls, name, bases, dct):
def make_descriptor(name):
return property(lambda self: getattr(self.arr, name))
type.__init__(cls, name, bases, dct)
ignore = 'class mro new init setattr getattr getattribute'
ignore = set('__{0}__'.format(name) for name in ignore.split())
for name in dir(np.ndarray):
if name.startswith('__'):
if name not in ignore and name not in dct:
setattr(cls, name, make_descriptor(name))
class SharedMemoryArray(object, metaclass=ArrayWrapper):
'''
This class can be used as a usual np.ndarray, but its data buffer
is allocated in shared memory (under Cached Files in memory monitor),
and can be passed across processes without any data copy/duplication,
even when write access happens (which is lock-synchronized).
The idea is to allocate memory using multiprocessing.Array, and
access it from current or another process via a numpy.ndarray view,
without actually copying the data.
So it is both convenient and efficient when used with multiprocessing.
This implementation also demonstrates the power of composition + metaclass,
as opposed to the canonical multiple inheritance.
'''
def __init__(self, dtype, shape, initializer=None, lock=True):
self.dtype = np.dtype(dtype)
self.shape = shape
if initializer is None:
# Preallocate memory using multiprocessing is the preferred usage
self.shared_arr = multiprocessing.Array(self.dtype2ctypes[self.dtype], int(np.prod(self.shape)), lock=lock)
else:
self.shared_arr = multiprocessing.Array(self.dtype2ctypes[self.dtype], initializer, lock=lock)
if not lock:
self.arr = np.frombuffer(self.shared_arr, dtype=self.dtype).reshape(self.shape)
else:
self.arr = np.frombuffer(self.shared_arr.get_obj(), dtype=self.dtype).reshape(self.shape)
@classmethod
def zeros(cls, shape, dtype=float, lock=True):
'''
Return a new array of given shape and dtype, filled with zeros.
This is the preferred usage, which avoids holding two copies of the
potentially very large data simultaneously in the memory.
'''
return cls(dtype, shape, lock=lock)
@classmethod
def from_array(cls, arr, lock=True):
'''
Initialize a new shared-memory array with an existing array.
'''
# return cls(arr.dtype, arr.shape, arr.ravel(), lock=lock) # Slow and memory inefficient, why?
a = cls.zeros(arr.shape, dtype=arr.dtype, lock=lock)
a[:] = arr # This is a more efficient way of initialization
return a
def __getattr__(self, attr):
if attr in ['acquire', 'release']:
return getattr(self.shared_arr, attr)
else:
return getattr(self.arr, attr)
def __dir__(self):
return list(self.__dict__.keys()) + ['acquire', 'release'] + dir(self.arr)
# At present, only numerical dtypes are supported.
dtype2ctypes = {
bool: ctypes.c_bool,
int: ctypes.c_long,
float: ctypes.c_double,
np.dtype('bool'): ctypes.c_bool,
np.dtype('int64'): ctypes.c_long,
np.dtype('int32'): ctypes.c_int,
np.dtype('int16'): ctypes.c_short,
np.dtype('int8'): ctypes.c_byte,
np.dtype('uint64'): ctypes.c_ulong,
np.dtype('uint32'): ctypes.c_uint,
np.dtype('uint16'): ctypes.c_ushort,
np.dtype('uint8'): ctypes.c_ubyte,
np.dtype('float64'): ctypes.c_double,
np.dtype('float32'): ctypes.c_float,
} | 1.742188 | 2 |
hearthbreaker/cards/minions/warlock.py | zetsubo/hearthstone-simulator | 0 | 12788489 | <filename>hearthbreaker/cards/minions/warlock.py
from hearthbreaker.cards.base import MinionCard, WeaponCard
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY, MINION_TYPE
from hearthbreaker.game_objects import Weapon, Minion
from hearthbreaker.tags.action import Summon, Kill, Damage, Discard, DestroyManaCrystal
from hearthbreaker.tags.base import Effect, Aura, Deathrattle, CardQuery, CARD_SOURCE, Battlecry
from hearthbreaker.tags.condition import IsType, MinionCountIs, Not
from hearthbreaker.tags.event import TurnEnded
from hearthbreaker.tags.selector import MinionSelector, MinionCardSelector, PlayerSelector, \
SelfSelector, BothPlayer, HeroSelector, CharacterSelector, RandomPicker
from hearthbreaker.tags.status import ChangeHealth, ManaChange
class FlameImp(MinionCard):
def __init__(self):
super().__init__("Flame Imp", 1, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, MINION_TYPE.DEMON,
battlecry=Battlecry(Damage(3), HeroSelector()))
def create_minion(self, player):
return Minion(3, 2)
class PitLord(MinionCard):
def __init__(self):
super().__init__("Pit Lord", 4, CHARACTER_CLASS.WARLOCK, CARD_RARITY.EPIC, MINION_TYPE.DEMON,
battlecry=Battlecry(Damage(5), HeroSelector()))
def create_minion(self, player):
return Minion(5, 6)
class Voidwalker(MinionCard):
def __init__(self):
super().__init__("Voidwalker", 1, CHARACTER_CLASS.WARLOCK, CARD_RARITY.FREE, MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(1, 3, taunt=True)
class DreadInfernal(MinionCard):
def __init__(self):
super().__init__("Dread Infernal", 6, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, MINION_TYPE.DEMON,
battlecry=Battlecry(Damage(1), CharacterSelector(players=BothPlayer())))
def create_minion(self, player):
return Minion(6, 6)
class Felguard(MinionCard):
def __init__(self):
super().__init__("Felguard", 3, CHARACTER_CLASS.WARLOCK, CARD_RARITY.RARE, MINION_TYPE.DEMON,
battlecry=Battlecry(DestroyManaCrystal(), PlayerSelector()))
def create_minion(self, player):
return Minion(3, 5, taunt=True)
class Doomguard(MinionCard):
def __init__(self):
super().__init__("Doomguard", 5, CHARACTER_CLASS.WARLOCK, CARD_RARITY.RARE, MINION_TYPE.DEMON,
battlecry=Battlecry(Discard(amount=2), PlayerSelector()))
def create_minion(self, player):
return Minion(5, 7, charge=True)
class Succubus(MinionCard):
def __init__(self):
super().__init__("Succubus", 2, CHARACTER_CLASS.WARLOCK, CARD_RARITY.FREE, MINION_TYPE.DEMON,
battlecry=Battlecry(Discard(), PlayerSelector()))
def create_minion(self, player):
return Minion(4, 3)
class SummoningPortal(MinionCard):
def __init__(self):
super().__init__("Summoning Portal", 4, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON)
def create_minion(self, player):
return Minion(0, 4, auras=[Aura(ManaChange(2, 1, MinionCardSelector()), PlayerSelector())])
class BloodImp(MinionCard):
def __init__(self):
super().__init__("Blood Imp", 1, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(0, 1, stealth=True,
effects=[Effect(TurnEnded(), ChangeHealth(1), MinionSelector(picker=RandomPicker()))])
class LordJaraxxus(MinionCard):
def __init__(self):
super().__init__("Lord Jaraxxus", 9, CHARACTER_CLASS.WARLOCK, CARD_RARITY.LEGENDARY, MINION_TYPE.DEMON)
def create_minion(self, player):
def summon_jaraxxus(minion):
from hearthbreaker.powers import JaraxxusPower
class BloodFury(WeaponCard):
def __init__(self):
super().__init__("Blood Fury", 3, CHARACTER_CLASS.LORD_JARAXXUS, CARD_RARITY.SPECIAL)
def create_weapon(self, player):
return Weapon(3, 8)
minion.remove_from_board()
player.trigger("minion_played", minion)
player.hero.health = minion.health
player.hero.base_health = minion.base_health + minion.health_delta
player.hero.character_class = CHARACTER_CLASS.LORD_JARAXXUS
player.hero.power = JaraxxusPower(player.hero)
blood_fury = BloodFury()
weapon = blood_fury.create_weapon(player)
weapon.card = blood_fury
weapon.player = player
weapon.game = player.game
weapon.equip(player)
return Minion(3, 15, battlecry=summon_jaraxxus)
class Infernal(MinionCard):
def __init__(self):
super().__init__("Infernal", 6, CHARACTER_CLASS.WARLOCK, CARD_RARITY.SPECIAL,
minion_type=MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(6, 6)
class VoidTerror(MinionCard):
def __init__(self):
super().__init__("Void Terror", 3, CHARACTER_CLASS.WARLOCK, CARD_RARITY.RARE, MINION_TYPE.DEMON)
def create_minion(self, player):
def consume_adjacent(m):
bonus_attack = 0
bonus_health = 0
if m.index > 0:
minion = m.player.minions[m.index - 1]
bonus_attack += minion.calculate_attack()
bonus_health += minion.health
minion.die(None)
if m.index < len(m.player.minions) - 1:
minion = m.player.minions[m.index + 1]
bonus_attack += minion.calculate_attack()
bonus_health += minion.health
minion.die(None)
m.change_attack(bonus_attack)
m.increase_health(bonus_health)
return Minion(3, 3, battlecry=consume_adjacent)
class Voidcaller(MinionCard):
def __init__(self):
super().__init__("Voidcaller", 4, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(3, 4, deathrattle=Deathrattle(Summon(CardQuery(conditions=[IsType(MINION_TYPE.DEMON)],
source=CARD_SOURCE.MY_HAND)), PlayerSelector()))
class AnimaGolem(MinionCard):
def __init__(self):
super().__init__("Anima Golem", 6, CHARACTER_CLASS.WARLOCK, CARD_RARITY.EPIC, MINION_TYPE.MECH)
def create_minion(self, player):
return Minion(9, 9, effects=[Effect(TurnEnded(MinionCountIs(1), BothPlayer()), Kill(), SelfSelector())])
class Imp(MinionCard):
def __init__(self):
super().__init__("Imp", 1, CHARACTER_CLASS.WARLOCK, CARD_RARITY.SPECIAL, MINION_TYPE.DEMON,
ref_name="Imp (Warlock)")
def create_minion(self, player):
return Minion(1, 1)
class WorthlessImp(MinionCard):
def __init__(self):
super().__init__("Worthless Imp", 1, CHARACTER_CLASS.WARLOCK, CARD_RARITY.SPECIAL, MINION_TYPE.DEMON)
def create_minion(self, p):
return Minion(1, 1)
class FelCannon(MinionCard):
def __init__(self):
super().__init__("Fel Cannon", 4, CHARACTER_CLASS.WARLOCK, CARD_RARITY.RARE, MINION_TYPE.MECH)
def create_minion(self, player):
return Minion(3, 5, effects=[Effect(TurnEnded(), Damage(2), MinionSelector(Not(IsType(MINION_TYPE.MECH, True)),
BothPlayer(), RandomPicker()))])
| 2.125 | 2 |
matplotlib/gallery_python/ticks_and_spines/tick-formatters.py | gottaegbert/penter | 13 | 12788490 | <reponame>gottaegbert/penter
"""
===============
Tick formatters
===============
Tick formatters define how the numeric value associated with a tick on an axis
is formatted as a string.
This example illustrates the usage and effect of the most common formatters.
"""
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def setup(ax, title):
"""Set up common parameters for the Axes in the example."""
# only show the bottom spine
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['top'].set_color('none')
# define tick positions
ax.xaxis.set_major_locator(ticker.MultipleLocator(1.00))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(which='major', width=1.00, length=5)
ax.tick_params(which='minor', width=0.75, length=2.5, labelsize=10)
ax.set_xlim(0, 5)
ax.set_ylim(0, 1)
ax.text(0.0, 0.2, title, transform=ax.transAxes,
fontsize=14, fontname='Monospace', color='tab:blue')
fig, axs = plt.subplots(7, 1, figsize=(8, 6))
# Null formatter
setup(axs[0], title="NullFormatter()")
axs[0].xaxis.set_major_formatter(ticker.NullFormatter())
# Fixed formatter
setup(axs[1], title="FixedFormatter(['A', 'B', 'C', ...])")
# FixedFormatter should only be used together with FixedLocator.
# Otherwise, one cannot be sure where the labels will end up.
positions = [0, 1, 2, 3, 4, 5]
labels = ['A', 'B', 'C', 'D', 'E', 'F']
axs[1].xaxis.set_major_locator(ticker.FixedLocator(positions))
axs[1].xaxis.set_major_formatter(ticker.FixedFormatter(labels))
# FuncFormatter can be used as a decorator
@ticker.FuncFormatter
def major_formatter(x, pos):
return "[%.2f]" % x
setup(axs[2], title='FuncFormatter(lambda x, pos: "[%.2f]" % x)')
axs[2].xaxis.set_major_formatter(major_formatter)
# FormatStr formatter
setup(axs[3], title="FormatStrFormatter('#%d')")
axs[3].xaxis.set_major_formatter(ticker.FormatStrFormatter("#%d"))
# Scalar formatter
setup(axs[4], title="ScalarFormatter()")
axs[4].xaxis.set_major_formatter(ticker.ScalarFormatter(useMathText=True))
# StrMethod formatter
setup(axs[5], title="StrMethodFormatter('{x:.3f}')")
axs[5].xaxis.set_major_formatter(ticker.StrMethodFormatter("{x:.3f}"))
# Percent formatter
setup(axs[6], title="PercentFormatter(xmax=5)")
axs[6].xaxis.set_major_formatter(ticker.PercentFormatter(xmax=5))
plt.tight_layout()
plt.show()
| 3.53125 | 4 |
messenger/pc2/ReaderSet2.py | ABSanthosh/Python-messenger | 0 | 12788491 | <filename>messenger/pc2/ReaderSet2.py
import imaplib
import threading
import email
import email.parser
global ik
ans="y"
while ans=="y"or ans=="Y":
mail = imaplib.IMAP4_SSL('imap.gmail.com')
mail.login('mail2', 'mail2pass')
mail.list()
mail.select('inbox')
result, data = mail.uid('search', None, '(FROM "mail3")')
i = len(data[0].split())
for x in range(i):
latest_email_uid = data[0].split()[x]
result, email_data = mail.uid('fetch', latest_email_uid, '(RFC822)')
raw_email = email_data[0][1]
raw_email_string = raw_email.decode('utf-8')
email_message = email.message_from_string(raw_email_string)
global xy
global yz
xy=email_message.get_payload().strip()
yz=email_message.get_payload().strip()
try:
del yz
if xy!=yz:
print(xy)
except:
if xy!=yz:
print(xy)
#ans=input("Reload?(y/n)")
if ans=="y"or ans=="Y":
continue
else:
print("Thank you for using Julius Coder!Come back again")
| 2.75 | 3 |
deeds/apps.py | kingsdigitallab/ec-django | 0 | 12788492 | from django.apps import AppConfig
class DeedsConfig(AppConfig):
name = 'deeds'
| 1.179688 | 1 |
LC/225.py | szhu3210/LeetCode_Solutions | 2 | 12788493 | class Stack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.l=[]
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.l.append(x)
for i in range(len(self.l)-1):
self.l.append(self.l.pop(0))
def pop(self):
"""
:rtype: nothing
"""
self.l.pop(0)
def top(self):
"""
:rtype: int
"""
return self.l[0]
def empty(self):
"""
:rtype: bool
"""
return not self.l | 4.0625 | 4 |
setup.py | kazuki/pyramid-oas3 | 2 | 12788494 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def _load_lines(filename):
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return [l.strip() for l in f.readlines()]
setup(
name='pyramid_oas3',
version='0.1.5',
description='OpenAPI 3.0 Validator for Pyramid',
packages=['pyramid_oas3', 'pyramid_oas3.jsonschema'],
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
url='http://github.com/kazuki/pyramid-oas3',
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=_load_lines('requirements.txt'),
tests_requires=_load_lines('test-requirements.txt'),
test_suite='nose2.collector.collector',
)
| 1.53125 | 2 |
examples/trending_example.py | RiccardoTOTI/TikTok-Api-1 | 3 | 12788495 | <reponame>RiccardoTOTI/TikTok-Api-1<gh_stars>1-10
from TikTokApi import TikTokApi
verify_fp = "verify_xxx"
api = TikTokApi(custom_verify_fp=verify_fp)
for video in api.trending.videos():
print(video.id)
| 2.25 | 2 |
backend/crawler_news.py | nohsion/Neutral_Gear_News | 1 | 12788496 | import requests
import time
from bs4 import BeautifulSoup
from pymongo import MongoClient
from selenium import webdriver
client = MongoClient('localhost', 27017)
db = client.dbnews
def crawler_daum_news(date):
db_list = client.list_database_names()
if 'dbnews' in db_list:
print('db 최신 뉴스로 새로고침')
client.drop_database('dbnews')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
}
url = 'https://news.daum.net/ranking/popular'
if url:
url += '?regDate=' + date
data = requests.get(url, headers=headers)
soup = BeautifulSoup(data.text, 'html.parser')
date = soup.select_one('.box_calendar > .screen_out').text
news_list = soup.select('.list_news2 > li')
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("user-agent=Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko")
driver = webdriver.Chrome(options=options)
for news in news_list:
rank = news.select_one('.rank_num.rank_popular > .wrap_num > .num_rank >.screen_out').text
detail_url = news.select_one('.cont_thumb > .tit_thumb > a')['href']
if news.select_one('a > img') is None:
img_url = ''
else:
img_url = news.select_one('a > img')['src']
title = news.select_one('.cont_thumb > .tit_thumb > a').text
info_news = news.select_one('.cont_thumb > .tit_thumb > .info_news').text
detail_content = news.select_one('.cont_thumb > .desc_thumb > .link_txt').text.strip()
driver.get(detail_url)
time.sleep(0.5) # 네트환경 환경에 따라...
soup2 = BeautifulSoup(driver.page_source, 'html.parser')
emoticon_list = soup2.select_one('.list-wrapper')
selects = emoticon_list.select('.count')
count_list = []
for i in range(len(selects)):
count_list.append(int(selects[i].text))
doc = {
'rank': rank,
'info_news': info_news,
'title': title,
'detail_content': detail_content,
'date': date,
'detail_url': detail_url,
'img_url': img_url,
'nr_RECOMMEND': count_list[0],
'nr_LIKE': count_list[1],
'nr_IMPRESS': count_list[2],
'nr_ANGRY': count_list[3],
'nr_SAD': count_list[4],
}
db.headline.insert_one(doc)
print(rank, info_news, title, detail_content, detail_url, img_url, count_list)
date = ''
crawler_daum_news(date)
| 2.8125 | 3 |
dashboard/libraries/constants.py | TetsuyaKataoka/DashBoardCovid19 | 1 | 12788497 | # CSVファイルのカラムとmodelのフィールドの対応付け情報
COLUMNS_PROVINCE_STATE_04 = 'Province_State'
COLUMNS_COUNTRY_REGION_04 = 'Country_Region'
# COLUMNS_REPORT_DATE_04 = 'Last_Update'
COLUMNS_LATITUDE_04 = 'Lat'
COLUMNS_LONGITUDE_04 = 'Long_'
COLUMNS_TOTAL_CASES_04 = 'Confirmed'
COLUMNS_TOTAL_DEATHS_04 = 'Deaths'
COLUMNS_TOTAL_RECOVERED_04 = 'Recovered'
COLUMNS_ACTIVE_CASES_04 = 'Active'
READ_COLUMNS_04 = [COLUMNS_PROVINCE_STATE_04,
COLUMNS_COUNTRY_REGION_04,
COLUMNS_LATITUDE_04,
COLUMNS_LONGITUDE_04,
COLUMNS_TOTAL_CASES_04,
COLUMNS_TOTAL_DEATHS_04,
COLUMNS_TOTAL_RECOVERED_04,
COLUMNS_ACTIVE_CASES_04
]
# CSVファイルのカラムとmodelのフィールドの対応付け情報
COLUMNS_PROVINCE_STATE_03 = 'Province/State'
COLUMNS_COUNTRY_REGION_03 = 'Country/Region'
COLUMNS_LATITUDE_03 = 'Latitude'
COLUMNS_LONGITUDE_03 = 'Longitude'
COLUMNS_TOTAL_CASES_03 = 'Confirmed'
COLUMNS_TOTAL_DEATHS_03 = 'Deaths'
COLUMNS_TOTAL_RECOVERED_03 = 'Recovered'
READ_COLUMNS_03 = [COLUMNS_PROVINCE_STATE_03,
COLUMNS_COUNTRY_REGION_03,
COLUMNS_LATITUDE_03,
COLUMNS_LONGITUDE_03,
COLUMNS_TOTAL_CASES_03,
COLUMNS_TOTAL_DEATHS_03,
COLUMNS_TOTAL_RECOVERED_03
]
COLUMN_KEYS = [
'province_state',
'country_region',
'latitude',
'longitude',
'confirmed',
'deaths',
'recovered',
'active'
]
# チャートに表示する日付の書式
DATE_FORMAT_CHART = '%m/%d'
# レポートcsvファイルのファイル名の日付書式
DATE_FORMAT_REPORT_CSV = '%m-%d-%Y'
# レポートcsvの格納先パス
DIRECTORY_PATH_REPORT_CSV = 'static/csv/'
| 1.632813 | 2 |
seahub/api2/endpoints/admin/org_stats.py | MJochim/seahub | 0 | 12788498 | <filename>seahub/api2/endpoints/admin/org_stats.py
# Copyright (c) 2012-2016 Seafile Ltd.
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.views import APIView
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.endpoints.admin.statistics import (
check_parameter, get_init_data, get_time_offset
)
from seahub.utils import get_org_traffic_by_day
from seahub.utils.timeutils import datetime_to_isoformat_timestr
class AdminOrgStatsTraffic(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
throttle_classes = (UserRateThrottle,)
permission_classes = (IsAdminUser,)
@check_parameter
def get(self, request, start_time, end_time, *args, **kwargs):
org_id = kwargs['org_id']
op_type_list = ['web-file-upload', 'web-file-download',
'sync-file-download', 'sync-file-upload',
'link-file-upload', 'link-file-download']
init_count = [0] * 6
init_data = get_init_data(start_time, end_time,
dict(list(zip(op_type_list, init_count))))
for e in get_org_traffic_by_day(org_id, start_time, end_time,
get_time_offset()):
dt, op_type, count = e
init_data[dt].update({op_type: count})
res_data = []
for k, v in list(init_data.items()):
res = {'datetime': datetime_to_isoformat_timestr(k)}
res.update(v)
res_data.append(res)
return Response(sorted(res_data, key=lambda x: x['datetime']))
| 2.0625 | 2 |
sites_microsoft_auth/urls.py | gskudder/django_sites_microsoft_auth | 0 | 12788499 | <filename>sites_microsoft_auth/urls.py
from .old_conf import config
app_name = "sites_microsoft_auth"
urlpatterns = []
if config.MICROSOFT_AUTH_LOGIN_ENABLED: # pragma: no branch
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r"^auth-callback/$",
views.AuthenticateCallbackView.as_view(),
name="auth-callback",
)
]
| 1.4375 | 1 |
postal_code_api/migrations/0002_auto_20161008_1707.py | xecgr/postal_code_api | 0 | 12788500 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-06 04:40
from __future__ import unicode_literals
from django.db import migrations
def create_locations(apps, schema_editor):
country_model = apps.get_model("postal_code_api", "Country")
region_model = apps.get_model("postal_code_api", "Region")
province_model = apps.get_model("postal_code_api", "Province")
country,_ = country_model.objects.get_or_create(
name = u'España',
country_code= 'ES',
)
#https://es.wikipedia.org/wiki/ISO_3166-2:ES
regions = {
'ES-AN' : u'Andalucía',
'ES-AR' : u'Aragón',
'ES-AS' : u'Asturias',
'ES-CN' : u'Canarias',
'ES-CB' : u'Cantabria',
'ES-CM' : u'Castilla La Mancha',
'ES-CL' : u'Castilla y León',
'ES-CT' : u'Catalunya',
'ES-EX' : u'Extremadura',
'ES-GA' : u'Galicia',
'ES-IB' : u'Illes Balears',
'ES-RI' : u'La Rioja',
'ES-MD' : u'Comunidad de Madrid',
'ES-MC' : u'Región de Murcia',
'ES-NC' : u'Navarra',
'ES-PV' : u'País Vasco',
'ES-VC' : u'Comunidad Valenciana',
}
provinces = {
'C' : [ u'A Coruña', 'ES-GA'],
'VI' : [ u'Álava', 'ES-PV'],
'AB' : [ u'Albacete', 'ES-CM'],
'A' : [ u'Alicante', 'ES-VC'],
'AL' : [ u'Almería', 'ES-AN'],
'O' : [ u'Asturias', 'ES-AS'],
'AV' : [ u'Ávila', 'ES-CL'],
'BA' : [ u'Badajoz', 'ES-EX'],
'IB' : [ u'Balears', 'ES-IB'],
'B' : [ u'Barcelona', 'ES-CT'],
'BI' : [ u'Vizcaya', 'ES-PV'],
'BU' : [ u'Burgos', 'ES-CL'],
'CC' : [ u'Cáceres', 'ES-EX'],
'CA' : [ u'Cádiz', 'ES-AN'],
'S' : [ u'Cantabria', 'ES-CB'],
'CS' : [ u'Castellón', 'ES-VC'],
'CR' : [ u'Ciudad Real', 'ES-CM'],
'CO' : [ u'Córdoba', 'ES-AN'],
'CU' : [ u'Cuenca', 'ES-CM'],
'SS' : [ u'Gipuzcoa', 'ES-PV'],
'GI' : [ u'Girona', 'ES-CT'],
'GR' : [ u'Granada', 'ES-AN'],
'GU' : [ u'Guadalajara', 'ES-CM'],
'H' : [ u'Huelva', 'ES-AN'],
'HU' : [ u'Huesca', 'ES-AR'],
'J' : [ u'Jaén', 'ES-AN'],
'LO' : [ u'La Rioja', 'ES-RI'],
'GC' : [ u'Las Palmas', 'ES-CN'],
'LE' : [ u'León', 'ES-CL'],
'L' : [ u'Lleida', 'ES-CT'],
'LU' : [ u'Lugo', 'ES-GA'],
'M' : [ u'Madrid', 'ES-MD'],
'MA' : [ u'Málaga', 'ES-AN'],
'MU' : [ u'Murcia', 'ES-MC'],
'NA' : [ u'Navarra', 'ES-NC'],
'OR' : [ u'Ourense', 'ES-GA'],
'P' : [ u'Palencia', 'ES-CL'],
'PO' : [ u'Pontevedra', 'ES-GA'],
'SA' : [ u'Salamanca', 'ES-CL'],
'TF' : [ u'Santa Cruz de Tenerife', 'ES-CN'],
'SG' : [ u'Segovia', 'ES-CL'],
'SE' : [ u'Sevilla', 'ES-AN'],
'SO' : [ u'Soria', 'ES-CL'],
'T' : [ u'Tarragona', 'ES-CT'],
'TE' : [ u'Teruel', 'ES-AR'],
'TO' : [ u'Toledo', 'ES-CM'],
'V' : [ u'Valencia', 'ES-VC'],
'VA' : [ u'Valladolid', 'ES-CL'],
'ZA' : [ u'Zamora', 'ES-CL'],
'Z' : [ u'Zaragoza', 'ES-AR'],
}
#https://es.wikipedia.org/wiki/Anexo:Provincias_de_Espa%C3%B1a_por_c%C3%B3digo_postal
official_code__zip_code = {
'VI' : '01',
'AB' : '02',
'A' : '03',
'AL' : '04',
'AV' : '05',
'BA' : '06',
'IB' : '07',
'B' : '08',
'BU' : '09',
'CC' : '10',
'CA' : '11',
'CS' : '12',
'CR' : '13',
'CO' : '14',
'C' : '15',
'CU' : '16',
'GI' : '17',
'GR' : '18',
'GU' : '19',
'SS' : '20',
'H' : '21',
'HU' : '22',
'J' : '23',
'LE' : '24',
'L' : '25',
'LO' : '26',
'LU' : '27',
'M' : '28',
'MA' : '29',
'MU' : '30',
'NA' : '31',
'OR' : '32',
'O' : '33',
'P' : '34',
'GC' : '35',
'PO' : '36',
'SA' : '37',
'TF' : '38',
'S' : '39',
'SG' : '40',
'SE' : '41',
'SO' : '42',
'T' : '43',
'TE' : '44',
'TO' : '45',
'V' : '46',
'VA' : '47',
'BI' : '48',
'ZA' : '49',
'Z' : '50',
}
region_code__region = {}
for region_code,name in regions.iteritems():
region,_ = region_model.objects.get_or_create(
country = country,
name = name,
region_code = region_code
)
region_code__region[region_code] = region
for official_code, name__region_code in provinces.iteritems():
name,region_code = name__region_code
region = region_code__region[region_code]
zip_code = official_code__zip_code[official_code]
province,_ = province_model.objects.get_or_create(
country = country,
region = region,
name = name,
official_code = official_code,
zip_code = zip_code
)
def delete_locations(apps, schema_editor):
country_model = apps.get_model("postal_code_api", "Country")
country_model.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('postal_code_api', '0001_initial'),
]
operations = [
migrations.RunPython(create_locations,delete_locations),
]
| 1.742188 | 2 |
my_flask_web/Config.py | s123600g/FlaskDemoNotes | 0 | 12788501 | # -*- coding: utf-8 -*-
class BaseConfig():
# Flask 主要目錄設置
static_url_path = ''
static_folder = ''
template_folder = ''
class DevelopermentConfig(BaseConfig):
DEBUG = True
SECRET_KEY = "flask1A556j/33X *~X2l!fgN]HelloWorlD/,?RT"
# 環境模式參數設置
config = {
'developermentConfig': DevelopermentConfig,
}
| 1.695313 | 2 |
train/labeler.py | lvaughn/nnsplit | 248 | 12788502 | from typing import List
from fractions import Fraction
from abc import ABC, abstractmethod
import spacy
import string
import random
import pandas as pd
import numpy as np
import diskcache
import sys
from somajo import SoMaJo
from spacy.lang.tr import Turkish
from spacy.lang.sv import Swedish
from spacy.lang.uk import Ukrainian
NO_MODEL_LANGUAGE_LOOKUP = {
"turkish": Turkish,
"swedish": Swedish,
"ukrainian": Ukrainian,
}
def noise(text, insert_chance, delete_chance, repeat_chance):
assert insert_chance == delete_chance == repeat_chance
chances = np.random.random(len(text) * 3)
if (chances < insert_chance).all():
return text
out = ""
for i, char in enumerate(text):
if chances[i * 3] >= delete_chance:
out += char
if chances[(i * 3) + 1] < repeat_chance:
out += char
if chances[(i * 3) + 2] < insert_chance:
out += random.choice(string.ascii_letters)
return out
def get_model(name):
try:
nlp = spacy.load(name, disable=["tagger", "parser", "ner"])
except OSError:
nlp = NO_MODEL_LANGUAGE_LOOKUP[name]()
return nlp
def has_space(text: str) -> bool:
return any(x.isspace() for x in text)
class Tokenizer(ABC):
def __init__(self):
self.training = True
def train(self, mode=True):
self.training = mode
def eval(self):
self.train(False)
@abstractmethod
def tokenize(self, text: str) -> List[str]:
pass
def remove_last_punct(text: str, punctuation) -> str:
for i in range(len(text))[::-1]:
if text[i] in punctuation:
return text[:i] + text[i + 1 :]
elif not text[i].isspace():
return text
return text
class SpacySentenceTokenizer(Tokenizer):
def __init__(
self,
model_name: str,
lower_start_prob: Fraction,
remove_end_punct_prob: Fraction,
punctuation: str,
):
super().__init__()
self.nlp = get_model(model_name)
self.nlp.add_pipe("sentencizer")
self.lower_start_prob = lower_start_prob
self.remove_end_punct_prob = remove_end_punct_prob
self.punctuation = punctuation
def tokenize(self, text: str) -> List[str]:
out_sentences = []
current_sentence = ""
end_sentence = False
for token in self.nlp(text):
text = token.text
whitespace = token.whitespace_
if token.is_sent_start:
end_sentence = True
if end_sentence and not text.isspace():
if self.training and random.random() < self.remove_end_punct_prob:
current_sentence = remove_last_punct(current_sentence, self.punctuation)
out_sentences.append(current_sentence)
current_sentence = ""
end_sentence = False
if (
self.training
and len(current_sentence) == 0
and random.random() < self.lower_start_prob
):
text = text.lower()
current_sentence += text + whitespace
out_sentences.append(current_sentence)
return [x for x in out_sentences if len(x) > 0]
class SpacyWordTokenizer(Tokenizer):
def __init__(self, model_name: str):
super().__init__()
self.tokenizer = get_model(model_name).tokenizer
def tokenize(self, text: str) -> List[str]:
out_tokens = []
current_token = ""
for token in self.tokenizer(text):
if not token.text.isspace():
out_tokens.append(current_token)
current_token = ""
current_token += token.text + token.whitespace_
out_tokens.append(current_token)
return [x for x in out_tokens if len(x) > 0]
class SoMaJoSentenceTokenizer(Tokenizer):
def __init__(self, model_name: str):
super().__init__()
self.tokenizer = SoMaJo(model_name)
def tokenize(self, text: str) -> List[str]:
out_sentences = []
sentences = list(self.tokenizer.tokenize_text([text]))
for i, sentence in enumerate(sentences):
text = ""
for token in sentence:
if "SpaceAfter=No" in token.extra_info:
whitespace = ""
else:
whitespace = " "
text += token.text + whitespace
if i == len(sentences) - 1:
text = text.rstrip()
out_sentences.append(text)
return out_sentences
class SoMaJoWordTokenizer(Tokenizer):
def __init__(self, model_name: str):
super().__init__()
self.tokenizer = SoMaJo(model_name, split_sentences=False)
def tokenize(self, text: str) -> List[str]:
out_tokens = []
tokens = next(self.tokenizer.tokenize_text([text]))
for i, token in enumerate(tokens):
if "SpaceAfter=No" in token.extra_info or i == len(tokens) - 1:
whitespace = ""
else:
whitespace = " "
# sometimes sample more spaces than one space so the model learns to deal with it
while random.random() < 0.05:
whitespace += " "
out_tokens.append(token.text + whitespace)
return [x for x in out_tokens if len(x) > 0]
class WhitespaceTokenizer(Tokenizer):
def tokenize(self, text: str) -> List[str]:
out = None
for i in range(len(text))[::-1]:
if not text[i].isspace():
out = [text[: i + 1], text[i + 1 :]]
break
if out is None:
out = [text, ""]
return out
class SECOSCompoundTokenizer(Tokenizer):
def __init__(self, secos_path: str):
super().__init__()
sys.path.append(secos_path)
import decompound_server
self.decompound = decompound_server.make_decompounder(
[
"decompound_server.py",
f"{secos_path}data/denews70M_trigram__candidates",
f"{secos_path}data/denews70M_trigram__WordCount",
"50",
"3",
"3",
"5",
"3",
"upper",
"0.01",
"2020",
]
)
self.disk_cache = diskcache.Index("secos_cache")
self.cache = {}
for key in self.disk_cache:
self.cache[key] = self.disk_cache[key]
def tokenize(self, text: str) -> List[str]:
if text.isspace():
return [text]
text_bytes = text.encode("utf-8")
compounds = self.cache.get(text_bytes)
if compounds is None:
assert not has_space(text), text
compounds = self.decompound(text)
if len(compounds) == 0:
compounds = text
compound_bytes = compounds.encode("utf-8")
self.disk_cache[text_bytes] = compound_bytes
self.cache[text_bytes] = compound_bytes
else:
compounds = compounds.decode("utf-8")
compounds = compounds.split()
compounds = [noise(x, 0.001, 0.001, 0.001) for x in compounds]
return compounds if len(compounds) > 0 else [noise(text, 0.001, 0.001, 0.001)]
class Labeler:
def __init__(self, tokenizers):
self.tokenizers = tokenizers
def _annotate(self, text: str, tok_index=0):
if tok_index >= len(self.tokenizers):
return [(text, set())]
out = []
for token in self.tokenizers[tok_index].tokenize(text):
out += self._annotate(token, tok_index=tok_index + 1)
out[-1][1].add(tok_index)
return out
def _to_dense_label(self, annotations):
input_bytes = []
label = []
all_zeros = [0] * len(self.tokenizers)
for (token, annotation) in annotations:
token_bytes = token.encode("utf-8")
input_bytes += token_bytes
label += [all_zeros.copy() for _ in range(len(token_bytes))]
if len(label) > 0:
for idx in annotation:
label[-1][idx] = 1
return input_bytes, label
def label(self, text):
return self._to_dense_label(self._annotate(text))
def visualize(self, text):
text, label = self.label(text)
data = []
for char, label_col in zip(text, label):
data.append([char, *label_col])
df = pd.DataFrame(
data, columns=["byte", *[x.__class__.__name__ for x in self.tokenizers]]
).T
df.columns = ["" for _ in range(len(df.columns))]
with pd.option_context(
"display.max_columns",
len(text),
):
print(df)
if __name__ == "__main__":
labeler = Labeler(
[
SpacySentenceTokenizer(
"de_core_news_sm", lower_start_prob=0.7, remove_end_punct_prob=0.7, punctuation=".?!"
),
SpacyWordTokenizer("de_core_news_sm"),
WhitespaceTokenizer(),
SECOSCompoundTokenizer("../../../Experiments/SECOS/"),
]
)
labeler.visualize("KNN (ANN).")
| 2.90625 | 3 |
tests/py/test_close.py | mccolgst/www.gittip.com | 0 | 12788503 | from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import date
from decimal import Decimal as D
import mock
import pytest
from gratipay.billing.payday import Payday
from gratipay.models.community import Community
from gratipay.models.participant import Participant
from gratipay.testing import Harness
class TestClosing(Harness):
# close
def test_close_closes(self):
alice = self.make_participant('alice', claimed_time='now')
alice.close()
assert Participant.from_username('alice').is_closed
def test_close_fails_if_still_a_balance(self):
alice = self.make_participant('alice', claimed_time='now', balance=D('10.00'))
with pytest.raises(alice.BalanceIsNotZero):
alice.close()
def test_close_fails_if_still_owns_a_team(self):
alice = self.make_participant('alice', claimed_time='now')
self.make_team(owner=alice)
with pytest.raises(alice.StillATeamOwner):
alice.close()
def test_close_page_is_usually_available(self):
self.make_participant('alice', claimed_time='now')
body = self.client.GET('/~alice/settings/close', auth_as='alice').body
assert 'Personal Information' in body
def test_close_page_is_not_available_during_payday(self):
Payday.start()
self.make_participant('alice', claimed_time='now')
body = self.client.GET('/~alice/settings/close', auth_as='alice').body
assert 'Personal Information' not in body
assert 'Try Again Later' in body
def test_can_post_to_close_page(self):
self.make_participant('alice', claimed_time='now')
response = self.client.PxST('/~alice/settings/close', auth_as='alice')
assert response.code == 302
assert response.headers['Location'] == '/~alice/'
assert Participant.from_username('alice').is_closed
def test_cant_post_to_close_page_during_payday(self):
Payday.start()
self.make_participant('alice', claimed_time='now')
body = self.client.POST('/~alice/settings/close', auth_as='alice').body
assert 'Try Again Later' in body
def test_close_page_shows_a_message_to_team_owners(self):
alice = self.make_participant('alice', claimed_time='now')
self.make_team('A', alice)
body = self.client.GET('/~alice/settings/close', auth_as='alice').body
assert 'You are the owner of the A team.' in body
def test_close_page_shows_a_message_to_owners_of_two_teams(self):
alice = self.make_participant('alice', claimed_time='now')
self.make_team('A', alice)
self.make_team('B', alice)
body = self.client.GET('/~alice/settings/close', auth_as='alice').body
assert 'You are the owner of the A and B teams.' in body
def test_close_page_shows_a_message_to_owners_of_three_teams(self):
alice = self.make_participant('alice', claimed_time='now')
self.make_team('A', alice)
self.make_team('B', alice)
self.make_team('C', alice)
body = self.client.GET('/~alice/settings/close', auth_as='alice').body
assert 'You are the owner of the A, B and C teams.' in body
# cs - clear_subscriptions
def test_cs_clears_subscriptions(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
alice.set_subscription_to(self.make_team(), D('1.00'))
nsubscriptions = lambda: self.db.one("SELECT count(*) FROM current_subscriptions "
"WHERE subscriber='alice' AND amount > 0")
assert nsubscriptions() == 1
with self.db.get_cursor() as cursor:
alice.clear_subscriptions(cursor)
assert nsubscriptions() == 0
def test_cs_doesnt_duplicate_zero_subscriptions(self):
alice = self.make_participant('alice', claimed_time='now')
A = self.make_team()
alice.set_subscription_to(A, D('1.00'))
alice.set_subscription_to(A, D('0.00'))
nsubscriptions = lambda: self.db.one("SELECT count(*) FROM subscriptions "
"WHERE subscriber='alice'")
assert nsubscriptions() == 2
with self.db.get_cursor() as cursor:
alice.clear_subscriptions(cursor)
assert nsubscriptions() == 2
def test_cs_doesnt_zero_when_theres_no_subscription(self):
alice = self.make_participant('alice')
nsubscriptions = lambda: self.db.one("SELECT count(*) FROM subscriptions "
"WHERE subscriber='alice'")
assert nsubscriptions() == 0
with self.db.get_cursor() as cursor:
alice.clear_subscriptions(cursor)
assert nsubscriptions() == 0
def test_cs_clears_multiple_subscriptions(self):
alice = self.make_participant('alice', claimed_time='now')
alice.set_subscription_to(self.make_team('A'), D('1.00'))
alice.set_subscription_to(self.make_team('B'), D('1.00'))
alice.set_subscription_to(self.make_team('C'), D('1.00'))
alice.set_subscription_to(self.make_team('D'), D('1.00'))
alice.set_subscription_to(self.make_team('E'), D('1.00'))
nsubscriptions = lambda: self.db.one("SELECT count(*) FROM current_subscriptions "
"WHERE subscriber='alice' AND amount > 0")
assert nsubscriptions() == 5
with self.db.get_cursor() as cursor:
alice.clear_subscriptions(cursor)
assert nsubscriptions() == 0
# cpi - clear_personal_information
@mock.patch.object(Participant, '_mailer')
def test_cpi_clears_personal_information(self, mailer):
alice = self.make_participant( 'alice'
, anonymous_giving=True
, anonymous_receiving=True
, avatar_url='img-url'
, email_address='<EMAIL>'
, claimed_time='now'
, session_token='<PASSWORD>'
, session_expires='2000-01-01'
, giving=20
, receiving=40
, npatrons=21
)
alice.upsert_statement('en', 'not forgetting to be awesome!')
alice.add_email('<EMAIL>')
with self.db.get_cursor() as cursor:
alice.clear_personal_information(cursor)
new_alice = Participant.from_username('alice')
assert alice.get_statement(['en']) == (None, None)
assert alice.anonymous_giving == new_alice.anonymous_giving == False
assert alice.anonymous_receiving == new_alice.anonymous_receiving == False
assert alice.number == new_alice.number == 'singular'
assert alice.avatar_url == new_alice.avatar_url == None
assert alice.email_address == new_alice.email_address == None
assert alice.claimed_time == new_alice.claimed_time == None
assert alice.giving == new_alice.giving == 0
assert alice.receiving == new_alice.receiving == 0
assert alice.npatrons == new_alice.npatrons == 0
assert alice.session_token == new_alice.session_token == None
assert alice.session_expires.year == new_alice.session_expires.year == date.today().year
assert not alice.get_emails()
team = self.make_participant('team', number='plural')
with self.db.get_cursor() as cursor:
team.clear_personal_information(cursor)
team2 = Participant.from_username('team')
assert team.number == team2.number == 'singular'
def test_cpi_clears_communities(self):
alice = self.make_participant('alice')
alice.insert_into_communities(True, 'test', 'test')
bob = self.make_participant('bob')
bob.insert_into_communities(True, 'test', 'test')
assert Community.from_slug('test').nmembers == 2 # sanity check
with self.db.get_cursor() as cursor:
alice.clear_personal_information(cursor)
assert Community.from_slug('test').nmembers == 1
# uic = update_is_closed
def test_uic_updates_is_closed(self):
alice = self.make_participant('alice')
alice.update_is_closed(True)
assert alice.is_closed
assert Participant.from_username('alice').is_closed
def test_uic_updates_is_closed_False(self):
alice = self.make_participant('alice')
alice.update_is_closed(True)
alice.update_is_closed(False)
assert not alice.is_closed
assert not Participant.from_username('alice').is_closed
def test_uic_uses_supplied_cursor(self):
alice = self.make_participant('alice')
with self.db.get_cursor() as cursor:
alice.update_is_closed(True, cursor)
assert alice.is_closed
assert not Participant.from_username('alice').is_closed
assert Participant.from_username('alice').is_closed
| 2.046875 | 2 |
example/xmlrpc/rpcserver.py | tirkarthi/python-sensor | 61 | 12788504 | <gh_stars>10-100
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2019
from xmlrpc.server import SimpleXMLRPCServer
import opentracing
def dance(payload, carrier):
ctx = opentracing.tracer.extract(opentracing.Format.HTTP_HEADERS, carrier)
with opentracing.tracer.start_active_span('RPCServer', child_of=ctx) as scope:
scope.span.set_tag("span.kind", "entry")
scope.span.set_tag("rpc.call", "dance")
scope.span.set_tag("rpc.host", "rpc-api.instana.com:8261")
return "♪┏(°.°)┛┗(°.°)┓%s┗(°.°)┛┏(°.°)┓ ♪" % str(payload)
server = SimpleXMLRPCServer(("localhost", 8261))
print("Listening on port 8261...")
server.register_function(dance, "dance")
server.serve_forever() | 2.453125 | 2 |
tmhmm/__init__.py | nicolagulmini/tmhmm.py | 25 | 12788505 | from tmhmm.api import predict
__all__ = ['predict']
| 1.054688 | 1 |
writer/newyork30.py | rayidghani/esp32 | 0 | 12788506 | <gh_stars>0
# Code generated by font_to_py.py.
# Font: NewYork.ttf
# Cmd: ../../../micropython-font-to-py/font_to_py.py -x /System/Library/Fonts/NewYork.ttf 30 newyork30.py
version = '0.33'
def height():
return 30
def baseline():
return 23
def max_width():
return 29
def hmap():
return True
def reverse():
return False
def monospaced():
return False
def min_ch():
return 32
def max_ch():
return 126
_font =\
b'\x09\x00\x00\x00\x00\x00\x38\x00\x7c\x00\x7e\x00\x07\x00\x03\x00'\
b'\x01\x00\x01\x00\x01\x00\x03\x00\x06\x00\x1e\x00\x1c\x00\x18\x00'\
b'\x20\x00\x10\x00\x00\x00\x00\x00\x00\x00\x38\x00\x38\x00\x38\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00'\
b'\x00\x00\x38\x38\x38\x38\x38\x38\x38\x38\x10\x10\x10\x10\x10\x10'\
b'\x10\x00\x00\x00\x38\x38\x38\x00\x00\x00\x00\x00\x00\x00\x0c\x00'\
b'\x00\x00\x00\x00\x39\xc0\x39\xc0\x39\xc0\x39\xc0\x39\xc0\x10\x80'\
b'\x10\x80\x10\x80\x10\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00'\
b'\x00\x00\x04\x10\x04\x10\x04\x10\x04\x10\x04\x10\x04\x10\x04\x10'\
b'\x7f\xfe\x04\x10\x08\x10\x08\x10\x08\x20\x08\x20\x08\x20\x7f\xfe'\
b'\x08\x20\x08\x20\x08\x20\x08\x20\x08\x20\x08\x20\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x02\x00\x02\x00'\
b'\x02\x00\x0f\xc0\x3a\x70\x32\x30\x72\x10\x72\x10\x72\x00\x7a\x00'\
b'\x3e\x00\x3f\x00\x1f\xc0\x07\xe0\x03\xf0\x02\x78\x02\x38\x02\x38'\
b'\x42\x38\x42\x38\x62\x70\x72\xe0\x1f\x80\x02\x00\x02\x00\x02\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00'\
b'\x1e\x00\x80\x33\x00\x80\x21\x01\x00\x61\x81\x00\x61\x82\x00\x61'\
b'\x82\x00\x61\x84\x00\x61\x84\x00\x21\x08\x00\x33\x10\x00\x1e\x10'\
b'\xf0\x00\x21\x98\x00\x21\x08\x00\x43\x0c\x00\x43\x0c\x00\x83\x0c'\
b'\x00\x83\x0c\x01\x03\x0c\x01\x01\x08\x02\x01\x98\x02\x00\xf0\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00\x03\xe0\x00\x0e'\
b'\x38\x00\x0c\x1c\x00\x1c\x1c\x00\x1c\x1c\x00\x1c\x1c\x00\x1e\x18'\
b'\x00\x0f\x30\x00\x0f\xe0\x00\x07\xc0\xf0\x07\xe0\x40\x19\xf0\x40'\
b'\x38\xf8\x80\x30\x7d\x00\x70\x3e\x00\x70\x1f\x00\x70\x0f\x80\x78'\
b'\x07\xc0\x38\x1b\xe0\x1e\x20\xf0\x0f\xc0\x78\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x07\x00\x00\x00\x38\x38\x38\x38\x38\x10\x10\x10\x10\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x09\x00\x00\x80\x01\x00\x06\x00\x04\x00\x0c\x00\x18\x00\x18\x00'\
b'\x38\x00\x38\x00\x30\x00\x70\x00\x70\x00\x70\x00\x70\x00\x70\x00'\
b'\x70\x00\x70\x00\x70\x00\x30\x00\x38\x00\x38\x00\x18\x00\x0c\x00'\
b'\x0c\x00\x06\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x09\x00'\
b'\x80\x00\x40\x00\x20\x00\x10\x00\x18\x00\x1c\x00\x0c\x00\x0e\x00'\
b'\x0e\x00\x06\x00\x07\x00\x07\x00\x07\x00\x07\x00\x07\x00\x07\x00'\
b'\x07\x00\x07\x00\x06\x00\x0e\x00\x0e\x00\x0c\x00\x18\x00\x18\x00'\
b'\x30\x00\x20\x00\x40\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00'\
b'\x00\x00\x0c\x00\x0c\x00\x69\x80\x7b\x80\x0c\x00\x37\x00\x6b\x80'\
b'\x4d\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00'\
b'\x01\x00\x01\x00\x01\x00\x01\x00\x7f\xfc\x01\x00\x01\x00\x01\x00'\
b'\x01\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x70'\
b'\x30\x30\x10\x20\x40\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x70\x70\x70\x00'\
b'\x00\x00\x00\x00\x00\x00\x09\x00\x01\x00\x01\x00\x01\x00\x01\x00'\
b'\x02\x00\x02\x00\x02\x00\x02\x00\x04\x00\x04\x00\x04\x00\x04\x00'\
b'\x08\x00\x08\x00\x08\x00\x18\x00\x10\x00\x10\x00\x10\x00\x20\x00'\
b'\x20\x00\x20\x00\x20\x00\x40\x00\x40\x00\x40\x00\x40\x00\x00\x00'\
b'\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x03\xc0\x0c\x70\x18\x38'\
b'\x38\x18\x38\x1c\x30\x1c\x70\x0e\x70\x0e\x70\x0e\x70\x0e\x70\x0e'\
b'\x70\x0e\x70\x0e\x70\x0e\x70\x0e\x30\x0c\x38\x1c\x18\x1c\x1c\x18'\
b'\x0e\x30\x03\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0b\x00\x00\x00\x00\x00\x06\x00\x1e\x00\x2e\x00\x0e\x00'\
b'\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00'\
b'\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00'\
b'\x7f\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x0d\x00\x00\x00\x00\x00\x0f\x80\x1f\xe0\x21\xe0\x40\xf0\x00\x70'\
b'\x00\x70\x00\x70\x00\x70\x00\x60\x00\xe0\x00\xc0\x01\x80\x01\x80'\
b'\x03\x00\x06\x00\x0c\x00\x18\x00\x10\x00\x20\x00\x7f\xf0\x7f\xf0'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00'\
b'\x00\x00\x00\x00\x07\xc0\x1f\xe0\x30\xf0\x40\x70\x00\x70\x00\x70'\
b'\x00\x70\x00\xe0\x01\x80\x0f\x80\x01\xe0\x00\xf0\x00\x70\x00\x38'\
b'\x00\x38\x70\x38\x70\x38\x70\x30\x20\x70\x30\xe0\x0f\x80\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00'\
b'\x00\x00\x00\x18\x00\x38\x00\x78\x00\xb8\x00\xb8\x01\x38\x02\x38'\
b'\x02\x38\x04\x38\x04\x38\x08\x38\x10\x38\x10\x38\x20\x38\x7f\xff'\
b'\x7f\xff\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00'\
b'\x0f\xf0\x0f\xf0\x10\x00\x10\x00\x10\x00\x10\x00\x20\x00\x20\x00'\
b'\x27\xc0\x38\xe0\x20\x70\x00\x78\x00\x38\x00\x38\x00\x38\x70\x38'\
b'\x70\x38\x70\x70\x60\x70\x30\xe0\x0f\x80\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x78'\
b'\x01\xc0\x03\x00\x0e\x00\x0c\x00\x1c\x00\x38\x00\x38\x00\x38\x00'\
b'\x73\xe0\x74\x70\x78\x38\x70\x1c\x70\x1c\x70\x1c\x70\x1c\x30\x1c'\
b'\x38\x18\x18\x38\x0c\x70\x07\xc0\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x7f\xf0\x7f\xf0'\
b'\x00\x10\x00\x20\x00\x20\x00\x60\x00\x40\x00\xc0\x00\x80\x00\x80'\
b'\x01\x80\x01\x00\x03\x00\x03\x00\x06\x00\x06\x00\x0e\x00\x0e\x00'\
b'\x0c\x00\x1c\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x0f\xe0\x18\x78\x30\x38'\
b'\x70\x1c\x70\x1c\x70\x1c\x78\x18\x3c\x38\x1f\x60\x0f\xc0\x03\xf0'\
b'\x1c\xf8\x38\x3c\x30\x1e\x70\x0e\x70\x0e\x70\x0e\x70\x0e\x38\x1c'\
b'\x1c\x38\x07\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0f\x00\x00\x00\x00\x00\x07\xc0\x1c\x60\x38\x30\x30\x38'\
b'\x70\x18\x70\x1c\x70\x1c\x70\x1c\x70\x1c\x38\x3c\x3c\x5c\x0f\x9c'\
b'\x00\x38\x00\x38\x00\x38\x00\x70\x00\x60\x00\xe0\x01\x80\x07\x00'\
b'\x3c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x38\x38\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x38\x38\x38\x00\x00\x00\x00\x00\x00\x00'\
b'\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x70\x70\x70\x00\x00'\
b'\x00\x00\x00\x00\x00\x70\x70\x30\x10\x20\x20\x40\x00\x00\x00\x00'\
b'\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x04\x00\x18\x00\x60\x01\x80\x06\x00\x18\x00\x60\x00'\
b'\x30\x00\x0c\x00\x03\x00\x00\x80\x00\x60\x00\x18\x00\x04\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x3f\xfc\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x3f\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x40\x00'\
b'\x30\x00\x0c\x00\x03\x00\x00\xc0\x00\x30\x00\x0c\x00\x18\x00\x20'\
b'\x00\xc0\x03\x00\x0c\x00\x30\x00\x40\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00'\
b'\x38\x00\x7c\x00\x7e\x00\x07\x00\x03\x00\x01\x00\x01\x00\x01\x00'\
b'\x03\x00\x06\x00\x1e\x00\x1c\x00\x18\x00\x20\x00\x10\x00\x00\x00'\
b'\x00\x00\x00\x00\x38\x00\x38\x00\x38\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x1f\xc0\x00\x00\xe0\x70\x00\x01\x00'\
b'\x08\x00\x06\x00\x04\x00\x0c\x00\x02\x00\x08\x00\x01\x00\x10\x1e'\
b'\x21\x00\x30\x39\x60\x80\x20\x70\xc0\x80\x20\xe0\xc0\x80\x40\xe0'\
b'\xc0\x80\x40\xe0\xc0\x80\x41\xc1\xc0\x80\x41\xc1\x80\x80\x41\xc1'\
b'\x81\x00\x41\xc1\x81\x00\x41\xc3\x82\x00\x41\xc3\x86\x00\x20\xed'\
b'\x8c\x00\x20\x70\xf0\x00\x30\x00\x00\x00\x10\x00\x00\x00\x08\x00'\
b'\x00\x00\x06\x00\x00\x00\x03\x81\x80\x00\x00\xfe\x00\x00\x00\x00'\
b'\x00\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x00\x00\xc0\x00'\
b'\x00\xe0\x00\x01\xe0\x00\x01\xe0\x00\x02\x70\x00\x02\x70\x00\x02'\
b'\x70\x00\x04\x38\x00\x04\x38\x00\x04\x38\x00\x08\x1c\x00\x08\x1c'\
b'\x00\x0f\xfc\x00\x10\x0e\x00\x10\x0e\x00\x10\x0e\x00\x20\x07\x00'\
b'\x20\x07\x00\x60\x07\x80\xf8\x1f\xc0\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12\x00'\
b'\x00\x00\x00\x00\x00\x00\x7f\xf0\x00\x1c\x1c\x00\x1c\x0e\x00\x1c'\
b'\x07\x00\x1c\x07\x00\x1c\x07\x00\x1c\x07\x00\x1c\x06\x00\x1c\x0e'\
b'\x00\x1c\x18\x00\x1f\xf8\x00\x1c\x1e\x00\x1c\x07\x00\x1c\x03\x80'\
b'\x1c\x03\x80\x1c\x03\x80\x1c\x03\x80\x1c\x03\x00\x1c\x07\x00\x1c'\
b'\x1c\x00\x7f\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\xfe\x00\x07\x03\x80\x0e\x01\xc0\x1c\x00\xc0\x38\x00'\
b'\x40\x38\x00\x40\x30\x00\x00\x70\x00\x00\x70\x00\x00\x70\x00\x00'\
b'\x70\x00\x00\x70\x00\x00\x70\x00\x00\x70\x00\x00\x38\x00\x00\x38'\
b'\x00\x40\x1c\x00\x40\x1c\x00\xc0\x0e\x01\xc0\x03\x83\x80\x00\xfe'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00\x7f\xf0'\
b'\x00\x1c\x1e\x00\x1c\x07\x00\x1c\x03\xc0\x1c\x01\xc0\x1c\x00\xe0'\
b'\x1c\x00\xe0\x1c\x00\x70\x1c\x00\x70\x1c\x00\x70\x1c\x00\x70\x1c'\
b'\x00\x70\x1c\x00\x70\x1c\x00\x70\x1c\x00\xe0\x1c\x00\xe0\x1c\x01'\
b'\xc0\x1c\x03\x80\x1c\x07\x00\x1c\x1c\x00\x7f\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x7f\xfe\x00\x1c\x0e\x00'\
b'\x1c\x02\x00\x1c\x02\x00\x1c\x02\x00\x1c\x00\x00\x1c\x00\x00\x1c'\
b'\x08\x00\x1c\x08\x00\x1c\x18\x00\x1f\xf8\x00\x1c\x18\x00\x1c\x08'\
b'\x00\x1c\x00\x00\x1c\x00\x00\x1c\x01\x00\x1c\x01\x00\x1c\x01\x00'\
b'\x1c\x03\x00\x1c\x07\x00\x7f\xff\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00'\
b'\x00\x00\x00\x00\x7f\xfe\x1c\x0e\x1c\x06\x1c\x02\x1c\x02\x1c\x00'\
b'\x1c\x00\x1c\x10\x1c\x10\x1c\x10\x1f\xf0\x1c\x10\x1c\x10\x1c\x00'\
b'\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x7f\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x14\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xfe\x00\x07\x07\x80\x0e\x01\x80\x1c\x00\x80'\
b'\x38\x00\x80\x38\x00\x80\x30\x00\x00\x70\x00\x00\x70\x00\x00\x70'\
b'\x00\x00\x70\x00\x00\x70\x07\xf0\x70\x01\xc0\x70\x01\xc0\x38\x01'\
b'\xc0\x38\x01\xc0\x18\x01\xc0\x1c\x01\xc0\x0e\x01\xc0\x03\x83\x80'\
b'\x00\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00'\
b'\x7f\x03\xf8\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x1c'\
b'\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x1f\xff'\
b'\xe0\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0'\
b'\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x1c\x00\xe0\x7f\x03\xf8\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x7f\x00\x1c\x00\x1c\x00'\
b'\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00'\
b'\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00'\
b'\x1c\x00\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0a\x00\x00\x00\x00\x00\x3f\x80\x0e\x00\x0e\x00\x0e\x00'\
b'\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00'\
b'\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00'\
b'\x0c\x00\x1c\x00\x18\x00\x18\x00\x20\x00\x40\x00\x00\x00\x00\x00'\
b'\x12\x00\x00\x00\x00\x00\x00\x00\x7f\x07\xc0\x1c\x03\x00\x1c\x02'\
b'\x00\x1c\x04\x00\x1c\x08\x00\x1c\x10\x00\x1c\x10\x00\x1c\x20\x00'\
b'\x1c\x40\x00\x1c\xe0\x00\x1f\xe0\x00\x1c\xf0\x00\x1c\x78\x00\x1c'\
b'\x38\x00\x1c\x3c\x00\x1c\x1c\x00\x1c\x1e\x00\x1c\x0f\x00\x1c\x07'\
b'\x00\x1c\x07\x80\x7f\x03\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00'\
b'\x00\x00\x7f\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00'\
b'\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00'\
b'\x1c\x02\x1c\x02\x1c\x06\x1c\x06\x1c\x0e\x7f\xfe\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x19\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x7e\x00\x3f\x00\x1e\x00\x3c\x00\x1e\x00\x3c\x00'\
b'\x17\x00\x5c\x00\x17\x00\x5c\x00\x17\x00\x5c\x00\x13\x80\x9c\x00'\
b'\x13\x80\x9c\x00\x13\x80\x9c\x00\x11\xc1\x1c\x00\x11\xc1\x1c\x00'\
b'\x10\xe2\x1c\x00\x10\xe2\x1c\x00\x10\xe2\x1c\x00\x10\x74\x1c\x00'\
b'\x10\x74\x1c\x00\x10\x74\x1c\x00\x10\x38\x1c\x00\x10\x38\x1c\x00'\
b'\x10\x38\x1c\x00\x7c\x10\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x13\x00\x00\x00\x00\x00\x00\x00\xf8\x03\xe0\x3c'\
b'\x00\x80\x3e\x00\x80\x2e\x00\x80\x2f\x00\x80\x27\x80\x80\x23\x80'\
b'\x80\x23\xc0\x80\x21\xe0\x80\x20\xe0\x80\x20\xf0\x80\x20\x70\x80'\
b'\x20\x78\x80\x20\x3c\x80\x20\x1c\x80\x20\x1e\x80\x20\x0f\x80\x20'\
b'\x07\x80\x20\x07\x80\x20\x03\x80\xf8\x01\x80\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x16\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x00\x03\x07\x00\x0e\x01'\
b'\xc0\x1c\x00\xe0\x18\x00\x60\x38\x00\x70\x30\x00\x70\x70\x00\x38'\
b'\x70\x00\x38\x70\x00\x38\x70\x00\x38\x70\x00\x38\x70\x00\x38\x70'\
b'\x00\x38\x38\x00\x30\x38\x00\x70\x18\x00\x60\x1c\x00\xe0\x0e\x01'\
b'\xc0\x03\x83\x00\x00\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00'\
b'\x00\x00\x00\x00\x7f\xf0\x00\x1c\x3c\x00\x1c\x0e\x00\x1c\x0e\x00'\
b'\x1c\x07\x00\x1c\x07\x00\x1c\x07\x00\x1c\x07\x00\x1c\x07\x00\x1c'\
b'\x0e\x00\x1c\x0e\x00\x1c\x38\x00\x1f\xe0\x00\x1c\x00\x00\x1c\x00'\
b'\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00'\
b'\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\xfc\x00\x03\x07\x00\x0e\x01\xc0\x1c\x00\xe0\x18\x00\x60\x38'\
b'\x00\x70\x30\x00\x70\x70\x00\x38\x70\x00\x38\x70\x00\x38\x70\x00'\
b'\x38\x70\x00\x38\x70\x00\x38\x70\x00\x38\x38\x00\x30\x38\x00\x70'\
b'\x18\x00\x60\x1c\x00\xe0\x0e\x01\xc0\x03\x83\x00\x00\xfc\x00\x00'\
b'\x1e\x00\x00\x0f\x00\x00\x07\xc0\x00\x01\xf8\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x12\x00\x00\x00\x00\x00\x00\x00\x7f\xf0\x00\x1c'\
b'\x3c\x00\x1c\x0e\x00\x1c\x0f\x00\x1c\x07\x00\x1c\x07\x00\x1c\x07'\
b'\x00\x1c\x07\x00\x1c\x06\x00\x1c\x0e\x00\x1c\x38\x00\x1f\xf0\x00'\
b'\x1c\x78\x00\x1c\x38\x00\x1c\x3c\x00\x1c\x1c\x00\x1c\x0e\x00\x1c'\
b'\x0e\x00\x1c\x07\x00\x1c\x07\x00\x7f\x03\xc0\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x0f\x00\x00\x00\x00\x00\x07\xe0\x1c\x38\x38\x18\x70\x08\x70\x08'\
b'\x70\x08\x78\x00\x7c\x00\x3e\x00\x1f\xc0\x0f\xf0\x01\xf8\x00\x78'\
b'\x00\x3c\x00\x1c\x00\x1c\x40\x1c\x40\x18\x60\x38\x78\x70\x0f\xc0'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00'\
b'\x00\x00\x00\x00\x00\x00\xff\xff\x80\xe1\xc3\x80\xc1\xc1\x80\x81'\
b'\xc0\x80\x81\xc0\x80\x01\xc0\x00\x01\xc0\x00\x01\xc0\x00\x01\xc0'\
b'\x00\x01\xc0\x00\x01\xc0\x00\x01\xc0\x00\x01\xc0\x00\x01\xc0\x00'\
b'\x01\xc0\x00\x01\xc0\x00\x01\xc0\x00\x01\xc0\x00\x01\xc0\x00\x01'\
b'\xc0\x00\x07\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13\x00\x00\x00\x00\x00'\
b'\x00\x00\xfe\x03\xe0\x38\x00\x80\x38\x00\x80\x38\x00\x80\x38\x00'\
b'\x80\x38\x00\x80\x38\x00\x80\x38\x00\x80\x38\x00\x80\x38\x00\x80'\
b'\x38\x00\x80\x38\x00\x80\x38\x00\x80\x38\x00\x80\x38\x00\x80\x38'\
b'\x00\x80\x1c\x01\x00\x1c\x01\x00\x0e\x02\x00\x07\x04\x00\x01\xf8'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x12\x00\x00\x00\x00\x00\x00\x00\xfe\x07'\
b'\xc0\x78\x01\x80\x38\x01\x00\x38\x01\x00\x1c\x02\x00\x1c\x02\x00'\
b'\x1c\x02\x00\x0e\x04\x00\x0e\x04\x00\x0e\x04\x00\x07\x08\x00\x07'\
b'\x08\x00\x07\x08\x00\x03\x90\x00\x03\x90\x00\x03\x90\x00\x01\xe0'\
b'\x00\x01\xe0\x00\x01\xc0\x00\x00\xc0\x00\x00\xc0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x0f\xe0\xf8'\
b'\x3c\x03\x80\x60\x3c\x03\x80\x60\x1c\x03\x80\x40\x1c\x05\xc0\x40'\
b'\x1c\x05\xc0\x40\x0e\x05\xc0\x80\x0e\x08\xc0\x80\x0e\x08\xe0\x80'\
b'\x07\x08\xe1\x00\x07\x08\x61\x00\x07\x10\x71\x00\x07\x10\x72\x00'\
b'\x03\x90\x72\x00\x03\xa0\x32\x00\x03\xa0\x3a\x00\x01\xe0\x3c\x00'\
b'\x01\xc0\x3c\x00\x01\xc0\x1c\x00\x00\xc0\x18\x00\x00\xc0\x18\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12\x00\x00\x00'\
b'\x00\x00\x00\x00\xfe\x07\xc0\x3c\x03\x00\x1c\x02\x00\x1e\x02\x00'\
b'\x0e\x04\x00\x0f\x08\x00\x07\x08\x00\x03\x90\x00\x03\xe0\x00\x01'\
b'\xe0\x00\x00\xe0\x00\x01\xe0\x00\x02\x70\x00\x02\x78\x00\x04\x38'\
b'\x00\x04\x1c\x00\x08\x1c\x00\x10\x0e\x00\x10\x0f\x00\x30\x07\x00'\
b'\xf8\x1f\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x13\x00\x00\x00\x00\x00\x00\x00'\
b'\xff\x07\xe0\x3c\x01\x80\x1c\x01\x00\x0e\x02\x00\x0e\x02\x00\x07'\
b'\x04\x00\x07\x04\x00\x03\x88\x00\x03\xd0\x00\x01\xd0\x00\x00\xe0'\
b'\x00\x00\xe0\x00\x00\xe0\x00\x00\xe0\x00\x00\xe0\x00\x00\xe0\x00'\
b'\x00\xe0\x00\x00\xe0\x00\x00\xe0\x00\x00\xe0\x00\x03\xf8\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x3f\xff\x00\x38'\
b'\x0f\x00\x20\x0e\x00\x20\x1c\x00\x00\x3c\x00\x00\x38\x00\x00\x78'\
b'\x00\x00\x70\x00\x00\xe0\x00\x01\xe0\x00\x01\xc0\x00\x03\xc0\x00'\
b'\x03\x80\x00\x07\x80\x00\x0f\x00\x00\x0e\x00\x00\x1e\x01\x00\x1c'\
b'\x01\x00\x3c\x03\x00\x78\x07\x00\x7f\xff\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x0a\x00\x1f\x80\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00'\
b'\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00'\
b'\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00'\
b'\x1c\x00\x1c\x00\x1c\x00\x1f\x80\x00\x00\x00\x00\x00\x00\x09\x00'\
b'\x40\x00\x40\x00\x40\x00\x40\x00\x20\x00\x20\x00\x20\x00\x20\x00'\
b'\x10\x00\x10\x00\x10\x00\x10\x00\x08\x00\x08\x00\x08\x00\x08\x00'\
b'\x04\x00\x04\x00\x04\x00\x04\x00\x02\x00\x02\x00\x02\x00\x02\x00'\
b'\x01\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x7e\x00'\
b'\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00'\
b'\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00'\
b'\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00\x0e\x00'\
b'\x0e\x00\x7e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00'\
b'\x01\x00\x02\x80\x02\x40\x04\x40\x08\x20\x08\x10\x10\x10\x20\x08'\
b'\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\xff\xfc\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0e\x00'\
b'\x06\x00\x06\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x80\x30\xe0\x70\x70\x70\x70'\
b'\x00\x70\x00\x70\x07\xf0\x18\x70\x30\x70\x70\x70\x70\x70\x70\x70'\
b'\x38\xf0\x1f\x3c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x10\x00\x00\x00\x00\x00\xf8\x00\x38\x00\x38\x00\x38\x00'\
b'\x38\x00\x38\x00\x38\x00\x39\xf0\x3e\x38\x38\x1c\x38\x1c\x38\x0e'\
b'\x38\x0e\x38\x0e\x38\x0e\x38\x0e\x38\x0e\x38\x1c\x38\x1c\x34\x38'\
b'\x23\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x07\xc0\x1c\x70\x38\x70\x38\x70\x70\x00\x70\x00'\
b'\x70\x00\x70\x00\x70\x00\x70\x00\x38\x00\x3c\x00\x1e\x10\x07\xe0'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00'\
b'\x00\x00\x00\x00\x00\x7c\x00\x1c\x00\x1c\x00\x1c\x00\x1c\x00\x1c'\
b'\x00\x1c\x07\xdc\x1c\x3c\x38\x1c\x38\x1c\x70\x1c\x70\x1c\x70\x1c'\
b'\x70\x1c\x70\x1c\x70\x1c\x38\x1c\x38\x1c\x1c\x7c\x0f\x9f\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x07\xc0\x1c\x70\x38\x30\x30\x38\x70\x38\x7f\xf8\x70\x00\x70\x00'\
b'\x70\x00\x78\x00\x38\x00\x3c\x00\x1e\x10\x07\xe0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00'\
b'\x0f\x00\x1f\x80\x3b\x80\x38\x00\x38\x00\x38\x00\x38\x00\x7f\x00'\
b'\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00'\
b'\x38\x00\x38\x00\x38\x00\x38\x00\xfc\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\xfe\x1c\x60'\
b'\x38\x30\x38\x38\x38\x38\x38\x38\x18\x30\x0c\x70\x03\xc0\x1c\x00'\
b'\x30\x00\x30\x00\x3f\xf0\x1f\xf8\x18\x1c\x30\x0c\x70\x0c\x70\x0c'\
b'\x70\x18\x3c\x30\x0f\xc0\x10\x00\x00\x00\x00\x00\xf8\x00\x38\x00'\
b'\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x39\xf0\x3e\x38\x38\x1c'\
b'\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c'\
b'\x38\x1c\x38\x1c\xfe\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x07\x00\x00\x00\x00\x38\x38\x38\x00\x00\x00\x78'\
b'\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x7e\x00\x00\x00'\
b'\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x38\x38\x38\x00\x00\x00'\
b'\x78\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x30'\
b'\x30\x60\x40\x80\x0f\x00\x00\x00\x00\x00\xf8\x00\x38\x00\x38\x00'\
b'\x38\x00\x38\x00\x38\x00\x38\x00\x38\x7c\x38\x30\x38\x20\x38\x40'\
b'\x38\x80\x39\x80\x3f\x80\x39\xc0\x38\xe0\x38\xe0\x38\x70\x38\x70'\
b'\x38\x38\xfc\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x07\x00\x00\x00\xf8\x38\x38\x38\x38\x38\x38\x38\x38\x38'\
b'\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\xfe\x00\x00\x00\x00\x00'\
b'\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf9'\
b'\xe1\xe0\x3e\x76\x70\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38'\
b'\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38\x38'\
b'\x38\x38\x38\x38\x38\x38\xfc\x7c\x7e\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xf9\xf0\x3e\x38\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c'\
b'\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c\xfe\x7f\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x07\xe0\x0c\x30\x18\x1c\x38\x1c\x70\x0e\x70\x0e\x70\x0e\x70\x0e'\
b'\x70\x0e\x70\x0e\x38\x1c\x38\x18\x1c\x30\x07\xe0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf9\xf0'\
b'\x3e\x38\x38\x1c\x38\x1c\x38\x0e\x38\x0e\x38\x0e\x38\x0e\x38\x0e'\
b'\x38\x0e\x38\x1c\x38\x1c\x3c\x38\x3b\xe0\x38\x00\x38\x00\x38\x00'\
b'\x38\x00\x38\x00\x38\x00\xfe\x00\x10\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\xc4\x1c\x2c'\
b'\x38\x1c\x38\x1c\x70\x1c\x70\x1c\x70\x1c\x70\x1c\x70\x1c\x70\x1c'\
b'\x38\x1c\x38\x1c\x1c\x7c\x0f\x9c\x00\x1c\x00\x1c\x00\x1c\x00\x1c'\
b'\x00\x1c\x00\x1c\x00\x7f\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf9\xc0\x3b\xc0\x3c\x00'\
b'\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00'\
b'\x38\x00\x38\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x80\x30\xc0\x60\x40\x60\x40'\
b'\x70\x00\x78\x00\x3f\x00\x07\xc0\x01\xe0\x00\x60\x40\x60\x60\x60'\
b'\x70\xc0\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x08\x00\x18\x00\x38\x00\x7f\x80\x38\x00\x38\x00\x38\x00\x38\x00'\
b'\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x38\x00\x1c\x00'\
b'\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x38\x1c\x78\x3c\x38\x1c\x38\x1c\x38\x1c\x38\x1c'\
b'\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x38\x1c\x1c\x7c\x0f\x9f'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xfe\x7c\x38\x10\x38\x10\x18\x20\x1c\x20\x1c\x20\x0e\x40'\
b'\x0e\x40\x0e\x40\x07\x80\x07\x80\x03\x00\x03\x00\x03\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x7e\x78\x38\x38\x10\x38'\
b'\x38\x20\x38\x5c\x20\x18\x5c\x20\x1c\x4c\x40\x1c\x8e\x40\x0c\x8e'\
b'\x40\x0e\x86\x80\x0f\x07\x80\x07\x07\x80\x07\x03\x00\x06\x03\x00'\
b'\x02\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x78\x38\x30'\
b'\x38\x20\x1c\x40\x0e\x80\x0e\x80\x07\x00\x07\x80\x0b\x80\x09\xc0'\
b'\x10\xe0\x10\xe0\x20\x70\xf1\xfc\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x7c\x78\x10\x38\x10'\
b'\x38\x20\x1c\x20\x1c\x40\x1c\x40\x0e\x40\x0e\x80\x06\x80\x07\x80'\
b'\x07\x00\x03\x00\x02\x00\x02\x00\x02\x00\x04\x00\x04\x00\x0c\x00'\
b'\x08\x00\x18\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x7f\xe0\x60\xe0\x41\xc0\x41\x80'\
b'\x03\x80\x07\x00\x07\x00\x0e\x00\x0c\x00\x1c\x00\x18\x20\x38\x20'\
b'\x70\x60\x7f\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x09\x00\x07\x00\x0c\x00\x18\x00\x18\x00\x18\x00\x18\x00'\
b'\x18\x00\x18\x00\x18\x00\x18\x00\x18\x00\x18\x00\x30\x00\xc0\x00'\
b'\x20\x00\x10\x00\x18\x00\x18\x00\x18\x00\x18\x00\x18\x00\x18\x00'\
b'\x18\x00\x18\x00\x18\x00\x0c\x00\x07\x00\x00\x00\x00\x00\x00\x00'\
b'\x09\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00'\
b'\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00'\
b'\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00'\
b'\x08\x00\x08\x00\x08\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00'\
b'\x70\x00\x18\x00\x0c\x00\x0c\x00\x0c\x00\x0c\x00\x0c\x00\x0c\x00'\
b'\x0c\x00\x0c\x00\x0c\x00\x0c\x00\x06\x00\x01\x80\x02\x00\x04\x00'\
b'\x0c\x00\x0c\x00\x0c\x00\x0c\x00\x0c\x00\x0c\x00\x0c\x00\x0c\x00'\
b'\x0c\x00\x18\x00\x70\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x1c\x04\x22\x04\x41\x04\x40\x88\x40\x70'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
_index =\
b'\x00\x00\x3e\x00\x5e\x00\x7e\x00\xbc\x00\xfa\x00\x38\x01\x94\x01'\
b'\xf0\x01\x10\x02\x4e\x02\x8c\x02\xca\x02\x08\x03\x28\x03\x66\x03'\
b'\x86\x03\xc4\x03\x02\x04\x40\x04\x7e\x04\xbc\x04\xfa\x04\x38\x05'\
b'\x76\x05\xb4\x05\xf2\x05\x30\x06\x50\x06\x70\x06\xae\x06\xec\x06'\
b'\x2a\x07\x68\x07\xe2\x07\x3e\x08\x9a\x08\xf6\x08\x52\x09\xae\x09'\
b'\xec\x09\x48\x0a\xa4\x0a\xe2\x0a\x20\x0b\x7c\x0b\xba\x0b\x34\x0c'\
b'\x90\x0c\xec\x0c\x48\x0d\xa4\x0d\x00\x0e\x3e\x0e\x9a\x0e\xf6\x0e'\
b'\x52\x0f\xcc\x0f\x28\x10\x84\x10\xe0\x10\x1e\x11\x5c\x11\x9a\x11'\
b'\xd8\x11\x16\x12\x54\x12\x92\x12\xd0\x12\x0e\x13\x4c\x13\x8a\x13'\
b'\xc8\x13\x06\x14\x44\x14\x64\x14\x84\x14\xc2\x14\xe2\x14\x3e\x15'\
b'\x7c\x15\xba\x15\xf8\x15\x36\x16\x74\x16\xb2\x16\xf0\x16\x2e\x17'\
b'\x6c\x17\xc8\x17\x06\x18\x44\x18\x82\x18\xc0\x18\xfe\x18\x3c\x19'\
b'\x7a\x19'
_mvfont = memoryview(_font)
_mvi = memoryview(_index)
ifb = lambda l : l[0] | (l[1] << 8)
def get_ch(ch):
oc = ord(ch)
ioff = 2 * (oc - 32 + 1) if oc >= 32 and oc <= 126 else 0
doff = ifb(_mvi[ioff : ])
width = ifb(_mvfont[doff : ])
next_offs = doff + 2 + ((width - 1)//8 + 1) * 30
return _mvfont[doff + 2:next_offs], 30, width
| 2.203125 | 2 |
emma/interface/management/commands/set_copyright.py | djangowebstudio/emma | 0 | 12788507 | <reponame>djangowebstudio/emma
from django.core.management.base import BaseCommand, CommandError, NoArgsCommand
from emma.interface.models import *
import os, sys
from optparse import make_option
class Command(BaseCommand):
help = """
Set all copyrights to a certain value. Will optionally filter on category and / or directory.
"""
args = "copyright [yes|no], category [photo|illustration], directory, action"
option_list = BaseCommand.option_list + (
make_option('-r', '--for-real',
action='store_true',
dest='action',
default=False,
help='Do the action.'),
make_option('-c', '--copyright',
action='store',
dest='copyright',
default='yes',
help='Enter a string.'),
make_option('-g', '--group',
action='store',
dest='category',
default='illustration',
type='string',
help='Enter a category (called group here).'),
make_option('-d', '--dir',
action='store',
dest='directory',
default='',
type='string',
help='Enter a directory.'),
)
def handle(self, *args, **options):
action = options.get('action', False)
copyright = options.get('copyright', 'yes')
category = options.get('category', 'illustration')
directory = options.get('directory', '')
print 'acting on category %s' % category
if not copyright:
sys.stderr.write(self.style.ERROR('Please enter a copyright. (-c, --copyright [yes|no])' ) + '\n')
exit()
copyright = True if copyright == 'yes' else False
if not category:
sys.stderr.write(self.style.ERROR('Please enter a category ( -g --group ["photo|illustration"]).' ) + '\n')
exit()
if directory and category:
m = Metadata.objects.filter(image__image_category=category, image__image_real_path__icontains=directory)
elif directory and not category:
m = Metadata.objects.filter(image__image_real_path__icontains=directory)
else:
m = Metadata.objects.filter(image__image_category=category)
for item in m:
print 'image: %s | copyright: %s | category: %s' % (item.image_LNID, item.copyright, item.image.image_category)
if action:
item.copyright = copyright
try:
item.save()
print 'saved %s' % copyright
try:
k = Keyword.objects.get(image_LNID=item.image_LNID)
k.copyright = copyright
k.save()
print 'saved %s ' % copyright
except Exception, inst:
sys.stderr.write(self.style.ERROR(inst ) + '\n')
except Exception, inst:
sys.stderr.write(self.style.ERROR(inst ) + '\n')
| 2.1875 | 2 |
DynamicPortfolioBuilder/Files/FetchData.py | shiv-24/PFbuilder | 0 | 12788508 | <gh_stars>0
import Files.ConnectMongo as MC
client = MC.getMongoClient('127.0.0.1:27017')
# dbCur = MC.getDataFindWithFiltersAndCustomFields(client,'market_data','stock_data',{'Ticker':'AAPL'},{'CompanyName':1,'Date':1,'_id':0},[('Date',1)])
# for docs in dbCur:
# print(docs)
# disStockVal = MC.getDistinctValues(client,'market_data','stock_data',"Ticker")
# disStockVal = MC.getDistinctValues(client,'market_data','benchmark_data',"BenchmarkTicker")
data = []
def findDataWithFilters_CustomFields(db_name,coll_name,filters,requiredFields,order):
data = MC.getDataFindWithFiltersAndCustomFields(client,db_name,coll_name,filters,requiredFields,order)
return data
def findDataWithFilters_AllFields(db_name,coll_name,filters,order):
data = MC.getDataFindWithFiltersAllFields(client,db_name,coll_name,filters,order)
return data
def findDataWithoutFilters(db_name,coll_name,order):
data = MC.getDataFindWithoutFilters(client,db_name,coll_name,order)
return data
def getDistinctValues(db_name,coll_name,attributes):
data = MC.getDistinctValues(client,db_name,coll_name,attributes)
return data | 2.34375 | 2 |
lr_prediction.py | taskera/Altran | 0 | 12788509 | <filename>lr_prediction.py
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 6 13:30:11 2019
@author: <EMAIL>
"""
""" Importing all the libraries and the dataset"""
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from imblearn.over_sampling import SMOTE
import statsmodels.api as sm
# Importing the dataset
dataset = pd.read_csv('InputData.csv')
X = dataset.loc[:, ['age','lifestyle','zip code', 'family status', 'car', 'sports', 'earnings', 'Living area']].values
y = dataset.loc[:, ['label']].values
""" Data exploration """
# Analysis of the dependent variable -> Imbalance
dataset.groupby('label').mean()
dataset.groupby(['label','lifestyle']).count()
dataset.groupby(['label','family status']).count()
dataset.groupby(['label','car']).count()
dataset.groupby(['label','sports']).count()
dataset.groupby(['label','Living area']).count()
#Plots of the imbalance dependant variable
dataset['label'].value_counts()
x = ['Response', 'No response']
count_no_res = len(dataset[dataset['label']=='no response'])
count_res = len(dataset[dataset['label']=='response'])
lif = [count_res, count_no_res]
plt.subplots(figsize=(8, 8))
plt.bar(x, lif, label = 'Response')
plt.title('Response Frequency', fontsize=18)
plt.xlabel('Label', fontsize=16)
plt.ylabel('Count', fontsize=16)
plt.show()
pct_of_no_res = count_no_res/(count_no_res+count_res)
print("percentage of no response is", pct_of_no_res*100)
pct_of_res = count_res/(count_no_res+count_res)
print("percentage of response", pct_of_res*100)
# Plot of the age for each labels
x = ['15', '20', '25', '30', '35', '40', '45', '50', '55', '60', '65', '70']
out = pd.cut(dataset.age, bins=list(range(15, 80, 5)), labels = x, include_lowest=True)
liv_tot = out.value_counts().sort_index()
liv_res = out[dataset.label=='response'].value_counts()
liv_res = 100*liv_res.sort_index().values/liv_tot.values
liv_nores = out[dataset.label=='no response'].value_counts()
liv_nores = 100*liv_nores.sort_index().values/liv_tot.values
plt.subplots(figsize=(8, 8))
plt.bar(x, liv_res, label = 'Response')
plt.bar(x, liv_nores, bottom=liv_res, label = 'No response')
plt.title('Response vs Age', fontsize=18)
plt.xlabel('Age', fontsize=16)
plt.ylabel('Percentage of Response', fontsize=16)
plt.legend()
plt.show()
# Plot of the earnings for each labels
x = ['20000', '30000', '40000', '50000', '60000', '70000', '80000', '90000', '100000', '110000', '120000', '130000', '140000']
out = pd.cut(dataset.earnings, bins=list(range(20000, 160000,10000)), labels = x, include_lowest=True)
liv_tot = out.value_counts().sort_index()
liv_res = out[dataset.label=='response'].value_counts()
liv_res = 100*liv_res.sort_index().values/liv_tot.values
liv_nores = out[dataset.label=='no response'].value_counts()
liv_nores = 100*liv_nores.sort_index().values/liv_tot.values
plt.subplots(figsize=(8, 8))
plt.bar(x, liv_res, label = 'Response')
plt.bar(x, liv_nores, bottom=liv_res, label = 'No response')
plt.title('Response vs Earnings', fontsize=18)
plt.xlabel('Earnings', fontsize=16)
plt.ylabel('Percentage of Response', fontsize=16)
plt.legend()
plt.show()
# Plot of the Living area for each label
x = ['Urban', 'Rural']
urban_response = 0
urban_noresponse = 0
rural_response = 0
rural_noresponse = 0
for i,j in enumerate(dataset['Living area']):
if j == 'urban' and y[i]=='response':
urban_response += 1
elif j == 'urban' and y[i]=='no response':
urban_noresponse += 1
elif j == 'rural' and y[i]=='response':
rural_response += 1
else:
rural_noresponse += 1
urban_response_100 = 100*urban_response/(urban_response+urban_noresponse)
urban_noresponse_100 = 100*urban_noresponse/(urban_response+urban_noresponse)
rural_response_100 = 100*rural_response/(rural_response+rural_noresponse)
rural_noresponse_100 = 100*rural_noresponse/(rural_response+rural_noresponse)
liv = [[urban_response_100,rural_response_100],[urban_noresponse_100,rural_noresponse_100]]
plt.subplots(figsize=(8, 8))
plt.bar(x, liv[0], label = 'Response')
plt.bar(x, liv[1], bottom=liv[0], label = 'No response')
plt.title('Response vs Living area', fontsize=18)
plt.xlabel('Living area', fontsize=16)
plt.ylabel('Percentage of Response', fontsize=16)
plt.legend()
plt.show()
# Plot of the lifestyle for each labels
x = ['Active', 'Cozily', 'Healthy']
active_response = 0
active_noresponse = 0
cozily_response = 0
cozily_noresponse = 0
healthy_response = 0
healthy_noresponse = 0
for i,j in enumerate(dataset['lifestyle']):
if j == 'active' and y[i]=='response':
active_response += 1
elif j == 'active' and y[i]=='no response':
active_noresponse += 1
elif j == 'cozily' and y[i]=='response':
cozily_response += 1
elif j == 'cozily' and y[i]=='no response':
cozily_noresponse += 1
elif j == 'healthy' and y[i]=='response':
healthy_response += 1
else:
healthy_noresponse += 1
active_response_100 = 100*active_response/(active_response+active_noresponse)
active_noresponse_100 = 100*active_noresponse/(active_response+active_noresponse)
cozily_response_100 = 100*cozily_response/(cozily_response+cozily_noresponse)
cozily_noresponse_100 = 100*cozily_noresponse/(cozily_response+cozily_noresponse)
healthy_response_100 = 100*healthy_response/(healthy_response+healthy_noresponse)
healthy_noresponse_100 = 100*healthy_noresponse/(healthy_response+healthy_noresponse)
lif = [[active_response_100,cozily_response_100, healthy_response_100],[active_noresponse_100,cozily_noresponse_100, healthy_noresponse_100]]
plt.subplots(figsize=(8, 8))
plt.bar(x, lif[0], label = 'Response')
plt.bar(x, lif[1], bottom=lif[0], label = 'No response')
plt.title('Response vs Lifestyle', fontsize=18)
plt.xlabel('Lifestyle', fontsize=16)
plt.ylabel('Percentage of Response', fontsize=16)
plt.legend()
plt.show()
# Plot of the family status for each labels
x = ['Married', 'Single']
married_response = 0
married_noresponse = 0
single_response = 0
single_noresponse = 0
for i,j in enumerate(dataset['family status']):
if j == 'married' and y[i]=='response':
married_response += 1
elif j == 'married' and y[i]=='no response':
married_noresponse += 1
elif j == 'single' and y[i]=='response':
single_response += 1
else:
single_noresponse += 1
married_response_100 = 100*married_response/(married_response+married_noresponse)
married_noresponse_100 = 100*married_noresponse/(married_response+married_noresponse)
single_response_100 = 100*single_response/(single_response+single_noresponse)
single_noresponse_100 = 100*single_noresponse/(single_response+single_noresponse)
liv = [[married_response_100,single_response_100],[married_noresponse_100,single_noresponse_100]]
plt.subplots(figsize=(8, 8))
plt.bar(x, liv[0], label = 'Response')
plt.bar(x, liv[1], bottom=liv[0], label = 'No response')
plt.title('Response vs Family status', fontsize=18)
plt.xlabel('Family status', fontsize=16)
plt.ylabel('Percentage of Response', fontsize=16)
plt.legend()
plt.show()
# Plot of the car type for each labels
x = ['Practical', 'Expensive']
car_response = 0
car_noresponse = 0
car_exp_response = 0
car_exp_noresponse = 0
for i,j in enumerate(dataset['car']):
if j == 'practical' and y[i]=='response':
car_response += 1
elif j == 'practical' and y[i]=='no response':
car_noresponse += 1
elif j == 'expensive' and y[i]=='response':
car_exp_response += 1
else:
car_exp_noresponse += 1
car_response_100 = 100*car_response/(car_response+car_noresponse)
car_noresponse_100 = 100*car_noresponse/(car_response+car_noresponse)
car_exp_response_100 = 100*car_exp_response/(car_exp_response+car_exp_noresponse)
car_exp_noresponse_100 = 100*car_exp_noresponse/(car_exp_response+car_exp_noresponse)
liv = [[car_response_100,car_exp_response_100],[car_noresponse_100,car_exp_noresponse_100]]
plt.subplots(figsize=(8, 8))
plt.bar(x, liv[0], label = 'Response')
plt.bar(x, liv[1], bottom=liv[0], label = 'No response')
plt.title('Response vs Car type', fontsize=18)
plt.xlabel('Car type', fontsize=16)
plt.ylabel('Percentage of Response', fontsize=16)
plt.legend()
plt.show()
# Plot of the sport for each labels
x = ['Athletics', 'Soccer', 'Badminton', 'None']
Athletics_response = 0
Athletics_noresponse = 0
Soccer_response = 0
Soccer_noresponse = 0
Badminton_response = 0
Badminton_noresponse = 0
None_response = 0
None_noresponse = 0
for i,j in enumerate(dataset['sports']):
if j == 'athletics' and y[i]=='response':
Athletics_response += 1
elif j == 'athletics' and y[i]=='no response':
Athletics_noresponse += 1
elif j == 'soccer' and y[i]=='response':
Soccer_response += 1
elif j == 'soccer' and y[i]=='no response':
Soccer_noresponse += 1
elif j == 'badminton' and y[i]=='response':
Badminton_response += 1
elif j == 'badminton' and y[i]=='no response':
Badminton_noresponse += 1
elif pd.isnull(j) and y[i]=='response':
None_response += 1
else:
None_noresponse += 1
Athletics_response_100 = 100*Athletics_response/(Athletics_response+Athletics_noresponse)
Athletics_noresponse_100 = 100*Athletics_noresponse/(Athletics_response+Athletics_noresponse)
Soccer_response_100 = 100*Soccer_response/(Soccer_response+Soccer_noresponse)
Soccer_noresponse_100 = 100*Soccer_noresponse/(Soccer_response+Soccer_noresponse)
Badminton_response_100 = 100*Badminton_response/(Badminton_response+Badminton_noresponse)
Badminton_noresponse_100 = 100*Badminton_noresponse/(Badminton_response+Badminton_noresponse)
None_response_100 = 100*None_response/(None_response+None_noresponse)
None_noresponse_100 = 100*None_noresponse/(None_response+None_noresponse)
lif = [[Athletics_response_100,Soccer_response_100, Badminton_response_100, None_response_100],[Athletics_noresponse_100,Soccer_noresponse_100, Badminton_noresponse_100, None_noresponse_100]]
plt.subplots(figsize=(8, 8))
plt.bar(x, lif[0], label = 'Response')
plt.bar(x, lif[1], bottom=lif[0], label = 'No response')
plt.title('Response vs Sport', fontsize=18)
plt.xlabel('Sport', fontsize=16)
plt.ylabel('Percentage of Response', fontsize=16)
plt.legend()
plt.show()
""" Encoding categorical data """
# Encoding of the inputs
# Encoding of lifestyle (column 2) 1--> cozily, 0-->active, 2--> healthy
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
# Encoding of family status (column 4) 0--> married, 1-->single
labelencoder_X_2 = LabelEncoder()
X[:, 3] = labelencoder_X_2.fit_transform(X[:, 3])
# Encoding of car (column 5) 1-->practical, 0-->expensive
labelencoder_X_3 = LabelEncoder()
X[:, 4] = labelencoder_X_3.fit_transform(X[:, 4])
## Taking care of missing data --> adding to new category
X[pd.isnull(X[:, 5]), 5] = 'none'
# Encoding of sports (column 6) 0-->athletics, 3-->soccer, 1--> badminton, 2-->none
labelencoder_X_4 = LabelEncoder()
X[:, 5] = labelencoder_X_4.fit_transform(X[:, 5])
# Encoding of living area (column 8) 1-->urban, 0-->rural
labelencoder_X_5 = LabelEncoder()
X[:, 7] = labelencoder_X_5.fit_transform(X[:, 7])
# Encode categorical data with more than 2 cases
# 1 0--> cozily, 0 0-->active, 0 1--> healthy
onehotencoder = OneHotEncoder(categorical_features = [1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
# 1 1-->athletics, 0 1-->soccer, 0 0--> badminton, 1 0-->none (One more column was added previously!)
onehotencoder = OneHotEncoder(categorical_features = [6])
X = onehotencoder.fit_transform(X).toarray()
# eliminate unnecesary columns
X[X[:,0]==1,2] = 1
X[X[:,0]==1,3] = 1
X = X[:, 2:]
""" New Column order:
sport | sport | lifestyle | lifestyle | age | zip code | family status | car | earnings | living area
"""
# Encoding the output 0 -> no response, 1-> response
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
""" Over-sampling using SMOTE """
# Oversampling of the data due to imbalance in the dependent variable
os = SMOTE(random_state=0)
os_data_X,os_data_y=os.fit_sample(X_train, y_train)
os_data_X = pd.DataFrame(data=os_data_X )
os_data_y= pd.DataFrame(data=os_data_y)
print("length of oversampled data is ",len(os_data_X))
print("Number of response in oversampled data",os_data_y[os_data_y==1].count())
print("Number of no response",os_data_y[os_data_y==0].count())
print("Proportion of response data in oversampled data is ",os_data_y[os_data_y==1].count()/len(os_data_X))
print("Proportion of no response data in oversampled data is ",os_data_y[os_data_y==0].count()/len(os_data_X))
# Check that the data is now balanced
os_data_y.mean()
""" Recursive Feature Elimination """
# Check if all independent variables are related to the dependent variable
logreg = LogisticRegression()
rfe = RFE(logreg, 20)
rfe = rfe.fit(os_data_X, os_data_y.values.ravel())
print(rfe.support_)
print(rfe.ranking_)
# Since all are related (True) no variable will be eliminated
""" Creating the logistic regression model"""
# Statistical model of the logistic regression to provide table with the results
logit_model=sm.Logit(os_data_y,os_data_X)
result=logit_model.fit()
print(result.summary2())
# Logistic regression model
logreg = LogisticRegression()
logreg.fit(os_data_X, os_data_y)
# Prediction with the test data splitted previously
predictions = logreg.predict(X_test)
print('Accuracy of logistic regression classifier on test set: {:.2f}'.format(logreg.score(X_test, y_test)))
# Confusion matrix
cm = confusion_matrix(y_test, predictions)
print(cm)
# Receiver operating characteristic curve (ROC curve)
logit_roc_auc = roc_auc_score(y_test, predictions)
fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:,1])
plt.figure(figsize=(8, 8))
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.title('Receiver operating characteristic', fontsize=18)
plt.legend(loc="lower right") | 2.84375 | 3 |
devilry/apps/core/migrations/0036_auto_20170523_1748.py | aless80/devilry-django | 29 | 12788510 | <gh_stars>10-100
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2017-05-23 17:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0035_auto_20170523_1747'),
]
operations = [
migrations.AlterField(
model_name='periodtag',
name='tag',
field=models.TextField(db_index=True),
),
]
| 1.421875 | 1 |
python/lib/singly_linked_list.py | mmore21/ds_algo | 0 | 12788511 | <reponame>mmore21/ds_algo
"""
Topic: Singly Linked List
Category: Data Structure
Author: <NAME>
"""
class Node:
def __init__(self, val, next=None):
""" Constructor of Node class. """
self.val = val
self.next = next
def __str__(self):
""" Override default string format of node when printed. """
return f"Node object: val={self.val}"
class SinglyLinkedList:
def __init__(self, head=None):
""" Constructor of SinglyLinkedList class. """
self.head = head
def add(self, node):
""" Append a node to the end of the linked list. """
if (self.head == None):
self.head = node
else:
ptr = self.head
while (ptr.next != None):
ptr = ptr.next
ptr.next = node
def delete(self, index):
""" Delete a node from the linked list at a specified index. """
# Raise IndexError if deletion index is out of linked list range.
if (index < 0 or index > self.length() - 1):
raise IndexError("Deletion index outside of linked list boundaries.")
# Initialize variable to be set to value of deleted node
val = None
# Special deletion case if index is 0 (head)
if (index == 0):
val = self.head.val
self.head = self.head.next
else:
pos = 0
ptr = self.head
# Iterate over linked list until at node before deletion index
while (pos != index - 1):
ptr = ptr.next
pos += 1
# Set variable to value of node that will be deleted
val = ptr.next.val
# Change the link of the previous node to the deleted node's next link
ptr.next = ptr.next.next
return val
def find(self, val):
""" Iteratively search for value in the linked list. """
ptr = self.head
while (ptr != None):
if ptr.val == val:
return ptr
ptr = ptr.next
return None
def length(self):
""" Get the length of the linked list. """
n = 0
ptr = self.head
while (ptr != None):
n += 1
ptr = ptr.next
return n
def __str__(self):
""" Override default string format of linked list when printed. """
s = ""
ptr = self.head
while (ptr != None):
s += f"{ptr.val} -> "
ptr = ptr.next
return s
def main():
""" Driver function for an example singly linked list. """
print("Adding:")
list = SinglyLinkedList(Node(4))
print(list)
list.add(Node(5))
print(list)
list.add(Node(6))
print(list)
print("Length:", list.length())
print("\nSearching:")
print(list.find(5))
print("\nDeleting:")
list.delete(1)
print(list)
list.delete(1)
print(list)
list.delete(0)
print(list)
print("Length:", list.length())
print("\nAdding:")
list.add(Node(4))
print(list)
print("Length:", list.length())
if __name__ == "__main__":
main() | 3.703125 | 4 |
flax/Ports.py | drewp/light9 | 2 | 12788512 | # super rough code
class AbstractPort:
def __init__(self):
pass
def put_data(self, value):
pass
def get_data(self):
pass
class Port(AbstractPort):
"Connects from a node to exactly one node."
def __init__(self, value=None):
AbstractPort.__init__(self)
self.value = value
def put_data(self, value):
self.value = value
def get_data(self):
return self.value
class MultiPort(AbstractPort):
"Connects from a node to any number of nodes."
def __init__(self, values=None):
AbstractPort.__init__(self)
self.values = values
def put_data(self, values):
self.values = values
def get_data(self):
return self.values
| 3.546875 | 4 |
setup.py | phuongnh3012/django-scraper | 16 | 12788513 | import scraper
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = scraper.__version__
setup(
name='django-scraper',
version=version,
description='Django application for collecting online content following '
'user-defined instructions',
long_description=open('README.rst').read(),
license='The MIT License (MIT)',
url='https://github.com/zniper/django-scraper',
author='<NAME>',
author_email='<EMAIL>',
packages=['scraper', 'scraper.management', 'scraper.management.commands',
'scraper.migrations'],
keywords='crawl scraper spider web pages data extract collect',
install_requires=[
'requests',
'lxml',
'simplejson==3.6.5',
'django-jsonfield==0.9.13',
'readability-lxml==0.5.1',
],
)
| 1.414063 | 1 |
vos/utils.py | rnikutta/datalab | 13 | 12788514 | <gh_stars>10-100
__author__ = 'jjk'
import errno
import os
def mkdir_p(path, mode):
try:
os.makedirs(path, mode)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
os.chmod(path, mode)
else:
raise OSError(errno.ENOTDIR, "{0} exists and is not a directory".format(path)) | 2.8125 | 3 |
scripts/analysis/scheduling_duration_cdf.py | Container-Projects/firmament | 287 | 12788515 | #!/usr/bin/python
import sys, re
from datetime import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
if len(sys.argv) < 2:
print "usage: scheduling_duration_cdf.py <log file 0> <label 0> " \
"<log file 1> <label 1> ..."
sys.exit(1)
durations = {}
for i in range(1, len(sys.argv), 2):
inputfile = sys.argv[i]
label = sys.argv[i+1]
att_start = None
# read and process log file
for line in open(inputfile).readlines():
rec = re.match("[A-Z][0-9]+ ([0-9:\.]+)\s+[0-9]+ .+\] (.+)", line)
if not rec:
#print "ERROR: failed to match line %s" % (line)
pass
else:
timestamp_str = rec.group(1)
message_str = rec.group(2)
timestamp = datetime.strptime(timestamp_str, "%H:%M:%S.%f")
m = re.match("START SCHEDULING (.+)",
message_str)
if m:
if att_start == None:
job_id = m.group(1)
att_start = timestamp
else:
print "ERROR: overlapping scheduling events?"
m = re.match("STOP SCHEDULING (.+).", message_str)
if m:
if att_start != None:
job_id = m.group(1)
duration = timestamp - att_start
if not label in durations:
durations[label] = []
durations[label].append(duration.total_seconds())
att_start = None
else:
print "ERROR: overlapping scheduling events?"
plt.figure()
for l, d in durations.items():
plt.hist(d, bins=200, label=l)
plt.legend(loc=4)
plt.ylabel("Count")
plt.xlabel("Scheduler runtime [sec]")
plt.savefig("scheduling_duration_hist.pdf", format="pdf", bbox_inches='tight')
plt.clf()
for l, d in durations.items():
plt.hist(d, bins=200, histtype='step', cumulative=True, normed=True, label=l,
lw=2.0)
plt.legend(loc=4)
plt.ylim(0, 1)
plt.xlabel("Scheduler runtime [sec]")
plt.savefig("scheduling_duration_cdf.pdf", format="pdf", bbox_inches='tight')
| 2.84375 | 3 |
ml_preprocess_tools/nlp/tokenizer.py | altescy/ml-preprocess-tools | 0 | 12788516 | <reponame>altescy/ml-preprocess-tools
from __future__ import annotations
import typing as tp
from collections import namedtuple
from itertools import chain
import joblib
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import gen_even_slices
from .token import Token, TokenType
Text = tp.List[Token]
Data = tp.List[Text]
SpacyToken = namedtuple(
'SpacyToken',
('text', 'lemma_', 'pos_', 'tag_')
)
class BaseTokenizer(BaseEstimator, TransformerMixin):
def __init__(self, n_jobs=-1):
self.n_jobs = n_jobs
def tokenize(self, x: str) -> Text:
raise NotImplementedError
def fit(self, X, y=None): # pylint: disable=unused-argument
return self
def transform(self, X: tp.List[str]) -> Data:
n_jobs = self.n_jobs if self.n_jobs > 0 else joblib.cpu_count() + 1 + self.n_jobs
assert n_jobs > 0
@joblib.delayed
@joblib.wrap_non_picklable_objects
def task(X):
return [self.tokenize(x) for x in list(X)]
return list(chain.from_iterable(
joblib.Parallel(n_jobs=n_jobs)(
task(X[s.start:s.stop])
for s in gen_even_slices(len(X), n_jobs)
)
))
class SplitTokenizer(BaseTokenizer):
def __init__(self, n_jobs: int = -1) -> None:
super().__init__(n_jobs=n_jobs)
def tokenize(self, x: str) -> tp.List[Token]:
return [Token(w.strip(), token_type=TokenType.PLAIN) for w in x.split()]
class SpacyTokenizer(BaseTokenizer):
def __init__(self, lang, n_jobs=-1):
super().__init__(n_jobs)
disables = ["textcat", "ner", "parser"]
import spacy
if lang in ["en", "de", "es", "pt", "fr", "it", "nl"]:
nlp = spacy.load(lang, disable=disables)
else:
nlp = spacy.load("xx", disable=disables)
self._nlp = nlp
def tokenize(self, x: str) -> tp.List[Token]:
tokens = self._nlp(x)
return [
Token(SpacyToken(t.text, t.lemma_, t.pos_, t.tag_))
for t in tokens
]
class MeCabTokenizer(BaseTokenizer):
JanomeToken = namedtuple(
"JanomeToken",
("surface", "part_of_speech", "infl_type", "infl_form", "base_form", "reading", "phonetic")
)
def __init__(self, n_jobs=-1) -> None:
super().__init__(n_jobs=n_jobs)
import MeCab
self.tagger = MeCab.Tagger("-Ochasen")
def tokenize(self, x: str) -> Text:
self.tagger.parse('')
node = self.tagger.parseToNode(x)
tokens = []
while node:
if node.surface:
surface = node.surface
features = node.feature.split(',')
if len(features) < 9:
pad_size = 9 - len(features)
features += ["*"] * pad_size
_token = MeCabTokenizer.JanomeToken(
surface, ",".join(features[:4]),
features[4], features[5],
features[6], features[7],
features[8]
)
token = Token(_token, token_type=TokenType.JA)
tokens.append(token)
node = node.next
return tokens
class JanomeTokenizer(BaseTokenizer):
def __init__(self, n_jobs=-1) -> None:
super().__init__(n_jobs=n_jobs)
from janome.tokenizer import Tokenizer
self.tokenizer = Tokenizer()
def tokenize(self, x: str) -> Text:
tokens = self.tokenizer.tokenize(x)
tokens = [Token(t, token_type=TokenType.JA) for t in tokens]
return tokens
| 2.234375 | 2 |
pommermanLearn/util/rewards.py | kungskode/playground | 0 | 12788517 | import numpy as np
from pommerman import constants
from pommerman.constants import Item
from util.data import calc_dist
def staying_alive_reward(nobs, agent_id):
"""
Return a reward if the agent with the given id is alive.
:param nobs: The game state
:param agent_id: The agent to check
:return: The reward for staying alive
"""
#print(nobs[0]['position'][0])
if agent_id in nobs[0]['alive']:
return 1.0
else:
return 0.0
def go_down_right_reward(nobs, high_pos, agent_num, act):
"""
Return a reward for going to the low or right side of the board
:param nobs: The current observation
:param high_pos: Tuple of lowest and most-right position
:param agent_num: The id of the agent to check (0-3)
:return: The reward for going down or right
"""
# only give rewards if a new highest point is reached
bomb_bonus = 0
if act[agent_num] == 5:
bomb_bonus = 0.00
if nobs[agent_num]['position'][0] > high_pos[0]:
return 1 + bomb_bonus, (nobs[agent_num]['position'][0], high_pos[1])
elif nobs[agent_num]['position'][1] > high_pos[1]:
return 1 + bomb_bonus, (high_pos[0], nobs[agent_num]['position'][1])
else:
return 0 + bomb_bonus, high_pos
def bomb_reward(nobs, act, agent_ind):
dist = calc_dist(agent_ind, nobs)
rwd = 0.0
if act[agent_ind] == 5:
rwd = 5.0/dist
elif act[agent_ind] == 0:
rwd = 0.0
else:
rwd = 1.0/dist
return rwd
def skynet_reward(obs, act, nobs, fifo, agent_inds, log):
"""
Skynet reward function rewarding enemy deaths, powerup pickups and stepping on blocks not in FIFO
:param obs: previous observation
:param nobs: new observation
:param fifo: 121 (11x11) cell queue
:return:
"""
# calculate rewards for player agents, rest are zero
r = [0.0] * len(obs)
for i in range(len(obs)):
if i not in agent_inds:
continue
log_ind = 0 if i <= 1 else 1
teammate_ind = i + 2 if log_ind == 0 else i - 2
n_enemies_prev = 0
alive_prev = obs[i]['alive']
for e in obs[i]['enemies']:
if e.value in alive_prev:
n_enemies_prev += 1
prev_n_teammate = 1 if obs[i]['teammate'].value in alive_prev else 0
prev_can_kick = obs[i]['can_kick']
prev_n_ammo = obs[i]['ammo']
prev_n_blast = obs[i]['blast_strength']
cur_alive = nobs[i]['alive']
n_enemy_cur = 0
for e in nobs[i]['enemies']:
if e.value in cur_alive:
n_enemy_cur += 1
cur_n_teammate = 1 if nobs[i]['teammate'].value in cur_alive else 0
cur_can_kick = nobs[i]['can_kick']
cur_n_ammo = nobs[i]['ammo']
cur_n_blast = nobs[i]['blast_strength']
cur_position = nobs[i]['position']
if n_enemies_prev - n_enemy_cur > 0:
r[i] += (n_enemies_prev - n_enemy_cur) * 0.5
log[log_ind][0] += (n_enemies_prev - n_enemy_cur) * 0.5
# if prev_n_teammate - cur_n_teammate > 0:
# r[i] -= (prev_n_teammate-cur_n_teammate)*0.5
# log[log_ind][4] -= (prev_n_teammate-cur_n_teammate)*0.5
if not prev_can_kick and cur_can_kick:
r[i] += 0.02
log[log_ind][1] += 0.02
if cur_n_ammo - prev_n_ammo > 0 and obs[i]['board'][cur_position[0]][cur_position[1]] == Item.ExtraBomb.value:
r[i] += 0.01
log[log_ind][1] += 0.01
if cur_n_blast - prev_n_blast > 0:
r[i] += 0.01
log[log_ind][1] += 0.01
if cur_position not in fifo[i]:
r[i] += 0.001
log[log_ind][2] += 0.001
if len(fifo[i]) == 121:
fifo[i].pop()
fifo[i].append(cur_position)
return r
def _get_positions(board, value):
wood_bitmap = np.isin(board, value).astype(np.uint8)
wood_positions = np.where(wood_bitmap == 1)
return list(zip(wood_positions[0], wood_positions[1]))
def woods_close_to_bomb_reward(obs, bomb_pos, blast_strength, agent_ids):
'''
:param obs: observation
:param bomb_pos: position bomb is layed
:param blast_strength: current blast strength of the agent
:param agent_ids: agent ids of teammates
:return: reward for laying bombs near wood and enemies
'''
board = obs['board']
wood_positions = _get_positions(board, constants.Item.Wood.value)
rigid_positions = _get_positions(board, constants.Item.Rigid.value)
enemy_ids = [10,11,12,13]
for id in agent_ids:
enemy_ids.remove(id)
enemy_positions =[]
for e in enemy_ids:
enemy_positions += _get_positions(board, e)
woods_in_range = 0.0
enemies_in_range = 0.0
# for every wooden block check if it would be destroyed
left_pos = np.asarray(bomb_pos)
for i in range(1, blast_strength+1):
if left_pos[0] == 0:
break
left_pos = (bomb_pos[0] - i, bomb_pos[1])
if left_pos in rigid_positions:
break
elif left_pos in enemy_positions:
enemies_in_range +=1
break
elif left_pos in wood_positions:
woods_in_range += 1
break
right_pos = np.asarray(bomb_pos)
for i in range(1, blast_strength + 1):
if right_pos[0] == len(board)-1:
break
right_pos = (bomb_pos[0] + i, bomb_pos[1])
if right_pos in rigid_positions:
break
elif right_pos in enemy_positions:
enemies_in_range += 1
break
elif right_pos in wood_positions:
woods_in_range += 1
break
down_pos = np.asarray(bomb_pos)
for i in range(1, blast_strength + 1):
if down_pos[1] == 0:
break
down_pos = (bomb_pos[0], bomb_pos[1] - i)
if down_pos in rigid_positions:
break
elif down_pos in enemy_positions:
enemies_in_range += 1
break
elif down_pos in wood_positions:
woods_in_range += 1
break
up_pos = np.asarray(bomb_pos)
for i in range(1, blast_strength + 1):
if up_pos[1] == len(board)-1:
break
up_pos = (bomb_pos[0], bomb_pos[1] + i)
if up_pos in rigid_positions:
break
elif up_pos in enemy_positions:
enemies_in_range += 1
break
elif up_pos in wood_positions:
woods_in_range += 1
break
# for each wood close to bomb reward x
reward = (0.01 * woods_in_range) + (0.3 * enemies_in_range)
return reward
| 3.1875 | 3 |
src/attrbench/suite/dashboard/components/pages/detail_page.py | zoeparman/benchmark | 0 | 12788518 | import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from plotly import express as px
from attrbench.suite.dashboard.components.pages import Page
class DetailPage(Page):
def __init__(self, result_obj, app):
super().__init__(result_obj)
self.app = app
self.rendered = {}
# Callback for method selection dropdown
app.callback(Output("plots-div", "children"),
Input("method-dropdown", "value"))(self._update_method)
def _update_method(self, method_name):
if method_name is not None:
if method_name not in self.rendered:
contents = []
for metric_name in self.result_obj.get_metrics():
contents.append(html.H2(metric_name))
metric_data = self.result_obj.data[metric_name][method_name]
metric_shape = self.result_obj.metadata[metric_name]["shape"]
plot = px.line(metric_data.transpose()) if metric_shape[1] > 1 else px.violin(metric_data)
contents.append(dcc.Graph(id=metric_name, figure=plot))
self.rendered[method_name] = contents
return contents
return self.rendered[method_name]
return f"No method selected."
def render(self) -> html.Div:
return html.Div([
dbc.FormGroup([
dcc.Dropdown(
id="method-dropdown",
options=[
{"label": method, "value": method} for method in self.result_obj.get_methods()
],
placeholder="Select method...")
]),
html.Div(id="plots-div")
])
| 2.375 | 2 |
result/models.py | thesaihan/ytu-su-voting | 0 | 12788519 | from django.db import models
# Create your models here.
class Candidate(models.Model):
id = models.IntegerField(primary_key=True)
cand_no = models.IntegerField(blank=True, null=True)
cand_type = models.CharField(max_length=1, blank=True, null=True)
name = models.CharField(max_length=150, blank=True, null=True)
class Meta:
managed = False
db_table = 'candidate'
def __str__(self):
return str(self.cand_no)+self.cand_type+" : "+self.name | 2.40625 | 2 |
micropython/term_server.py | al177/blinkencard | 2 | 12788520 |
from machine import UART
import sys
import select
import socket
import iceboot
import gc
def go():
"""
Stupid telnet to serial terminal for Blinkencard
"""
uart=UART(2, rx=16, tx=17, timeout=1)
uart.init(9600)
s = socket.socket()
s.bind(socket.getaddrinfo('0.0.0.0', 5000)[0][-1])
s.listen(1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
running = True
while running:
print('Waiting for conection')
cl, addr = s.accept()
print('Connected to ' + str(addr))
connected = True
cmd_mode = False
cl.setblocking(True)
p = select.poll()
p.register(cl, select.POLLIN)
# Tell telnet client to not echo and not wait for newline to send
# IAC DO LINEMODE
cl.write(b'\xFF\xFD\x22')
# IAC SB LINEMODE MODE 0
cl.write(b'\xFF\xFA\x22\x01\x00')
# IAC SE
cl.write(b'\xFF\xF0')
# IAC WILL ECHO
cl.write(b'\xFF\xFB\x01')
# flush out response for echo
cl.settimeout(100)
cl.recv(100)
while connected:
if uart.any():
recv_char=uart.read(100)
cl.write(recv_char)
events = p.poll(10)
if events:
for event_s, event in events:
if event & select.POLLIN:
# receive and filter
to_send = b''
control_chars = 0
recvd = event_s.recv(100)
# usocket doesn't do POLLERR or POLLHUP, it just returns empty on recv
if not len(recvd):
print('Disconnected')
p.unregister(event_s)
event_s.close()
connected = False
break
for test_char in recvd:
if control_chars:
control_chars -= control_chars
continue
if test_char == 0xFF:
control_chars = 2
continue
if test_char == 0x00:
continue
if not cmd_mode and test_char == 0x01: # ctrl-a
cmd_mode = True
continue
if cmd_mode:
cmd_mode = False
# Quit on 'k'
if (test_char == 0x6B or test_char == 0x4B):
p.unregister(event_s)
event_s.close()
connected = False
running = False
break
if (test_char == 0x72 or test_char == 0x52):
cl.write(b'Reloading config')
iceboot.boot('altair.bin')
continue
to_send += bytes([test_char])
uart.write(to_send)
s.close()
gc.collect()
| 2.4375 | 2 |
E#09/temp.py | vads5/-Python-Prog | 2 | 12788521 | '''
name: E#09
author: <NAME>
email: <EMAIL>
link: https://www.youtube.com/channel/UCNN3bpPlWWUkUMB7gjcUFlw
MIT License https://github.com/repen/E-parsers/blob/master/License
'''
import requests
from bs4 import BeautifulSoup
base_url = "https://wallpapershome.com"
space = "/space?page=4"
response = requests.get(base_url + space)
html = response.text
# print(html)
soup = BeautifulSoup(html, "html.parser")
conteiner = soup.find("div", {"class":"pics"})
images = conteiner.find_all("p")
urls_images = []
for image in images:
id_img = image.a["href"].split("-")[-1].replace(".html","")
urls_images.append("https://wallpapershome.com/images/pages/pic_h/" + id_img + ".jpg")
# break
images_byte = []
for url in urls_images:
images = requests.get(url)
images_byte.append(images.content)
# break
for e, image in enumerate(images_byte):
# print(image)
with open("image{}.jpg".format(e), "wb") as f:
f.write(image) | 2.984375 | 3 |
model/model.py | Team-Glare/calorieApp_server | 0 | 12788522 | <filename>model/model.py
import pandas as pd
import pymongo
import collections
import matplotlib.pyplot as plt
import numpy as np
import string
df = pd.read_csv('C:\\Users\\Shivam\\Desktop\\calorieApp_server\\model\\cleaned_data.csv')
index_list = df.index.tolist()
client = pymongo.MongoClient('mongodb://localhost:27017')
db = client["test"]
p_details = db["profile"] #profile details
records = p_details.find()
list_record = list(records)
df_profile = pd.DataFrame(list_record)
cur_wt_list = df_profile['weight'].tolist()
goal_wt_list = df_profile['target_weight'].tolist()
food = df['Food'].tolist()
calories = df['Calories'].tolist()
def find_subset(weight: list, req_sum: int):
l = len(weight)
# ROWS : array, # COL : range(sum)
row = l
col = req_sum + 1
# 2d array storing Sum
dp_array = [[0] * col for i in range(row)]
for i in range(row):
for j in range(1, col):
# Row 0
if i == 0:
if j >= weight[i]:
dp_array[i][j] = weight[i]
else:
continue
else:
if j - weight[i] >= 0:
dp_array[i][j] = max(dp_array[i - 1][j], (weight[i] + dp_array[i - 1][j - weight[i]]))
elif j >= weight[i]:
# take from row above it
dp_array[i][j] = max(dp_array[i - 1][j], weight[i])
else:
dp_array[i][j] = dp_array[i - 1][j]
# Find out which Numbers should be in the subset
# give from index 0
row -= 1
col -= 1
sum_subset = []
# check if the Subset is possible : if not, return None
if dp_array[row][col] != req_sum:
return None
# get the subset
while col >= 0 and row >= 0 and req_sum > 0:
# First Row
if (row == 0):
sum_subset.append(weight[row])
break
# Bottom-Right most ele
if (dp_array[row][col] != dp_array[row - 1][col]):
# print(req_sum,' : ',dp_array[row][col],dp_array[row-1][col],' : ',weight[row])
sum_subset.append(weight[row])
req_sum -= weight[row]
col -= weight[row]
row -= 1
else:
row -= 1
return sum_subset
cur_wt_track = []
#cur_wt = int(input('Enter current weight: '))
cur_wt = int(cur_wt_list[0])
goal_wt = int(goal_wt_list[0])
cur_wt_track.append(cur_wt)
#goal_wt = int(input('Enter goal weight: '))
#set_goal = int(input('In how many days? '))
cal_to_burn = (cur_wt-goal_wt)*7700
if goal_wt < cur_wt: #diet
daily_target = int((cur_wt-goal_wt)*7700/30)-int((cur_wt-goal_wt)*7700*0.8/30) #-2000 #1 kg = 7700 cal
else:
daily_target = int((goal_wt-cur_wt)*7700/30)-int((goal_wt-cur_wt)*7700*0.8/30)
#print(daily_target)
r = round((cur_wt-goal_wt)/30,2)
for i in range(30):
cur_wt = round(cur_wt - r,2)
cur_wt_track.append(cur_wt)
#print(cur_wt_track) #shows weight trend if diet is followed for 30 days
#calories.sort(reverse = True)
food_sort = [x for _,x in sorted(zip(calories,food))]
#print(food_sort)
calories.sort()
sum_subset = find_subset(calories, daily_target)
#print(sum_subset)
#if sum_subset is None:
#print("Sum :", daily_target, "is not possible")
#else:
#print("Subset for sum", daily_target, ' :', sum_subset)
occurrences = collections.Counter(sum_subset)
#print(occurrences)
dict_occ = dict(occurrences)
#print(dict_occ)
list_occ =[]
for i in dict_occ:
t = []
t.append(i)
t.append(dict_occ[i])
list_occ.append(t)
#print(list_occ)
u_cal = list(set(sum_subset))
u_cal_food = []
for i in range(len(u_cal)):
t =[]
for j in range(len(food)):
if u_cal[i] == calories[j]:
t.append(food_sort[j])
u_cal_food.append(t)
#print(u_cal_food)
'''
for i in range(len(u_cal)):
print('Consume one of these items', u_cal_food[i],'*',list_occ[i][1], 'times')
'''
diet_report = open('C:\\Users\\Shivam\\Desktop\\calorieApp_server\\model\\diet_guide.txt', "wt") #path
for i in range(len(u_cal)):
fl = 'Consume one of these items', u_cal_food[i],'*',list_occ[i][1], 'times'
fl = list(fl)
string = ' '.join([str(item) for item in fl])
diet_report.writelines(string)
diet_report.writelines('\n')
diet_report.close()
plt.plot(cur_wt_track, marker='o', color='green')
plt.xlabel('Days')
plt.ylabel('Weight')
plt.title('30 day weight gain/loss projections')
plt.show()
| 2.859375 | 3 |
contrib/internal/build-i18n.py | SCB2278252/reviewboard | 1 | 12788523 | #!/usr/bin/env python
from __future__ import unicode_literals
import os
import sys
import django
from django.core.management import call_command
import reviewboard
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reviewboard.settings')
if hasattr(django, 'setup'):
# Django >= 1.7
django.setup()
os.chdir(os.path.dirname(reviewboard.__file__))
sys.exit(call_command('compilemessages', interactive=False, verbosity=2))
| 1.65625 | 2 |
PageBotNano-002-Exporting/pagebotnano_002/document.py | juandelperal/PageBotNano | 0 | 12788524 | <filename>PageBotNano-002-Exporting/pagebotnano_002/document.py
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T N A N O
#
# Copyright (c) 2020+ <NAME> + <NAME>
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# -----------------------------------------------------------------------------
#
# document.py
#
# This source contains the class with knowledge about a generic document.
#
import sys # Import access to some deep Python functions
import os # Import standard Python library to create the _export directory
import drawBot # Import the drawBot functions, embedded in the DrawBot app.
if __name__ == "__main__":
sys.path.insert(0, "..") # So we can import pagebotnano002 without installing.
from pagebotnano_002.constants import A4, EXPORT_DIR
class Document:
# Class names start with a capital. See a class as a factory
# of document objects (name spelled with an initial lower case.)
def __init__(self, w=None, h=None):
"""This is the "constructor" of a Document instance (=object).
It takes two optional attributes: `w` is the general width of
pages and `h` is the general height of pages.
If omitted, a default A4 page size is taken from the content.py file.
>>> doc = Document()
>>> doc
I am a Document(w=595, h=842)
"""
if w is None: # If not defined, take take the width of A4
w, _ = A4
if h is None: # If not defined, then take the height of A4
_, h = A4
# Store the values in the document instance.
self.w = w
self.h = h
def __repr__(self):
# This method is called when print(document) is executed.
# It shows the name of the class, which can be different, if the
# object inherits from Document.
return 'I am a %s(w=%d, h=%d)' % (self.__class__.__name__, self.w, self.h)
def export(self, path):
"""Draw a page and export the document into the _export folder.
Note that in this version, we still generate the document page at
just before it is exported. Not Page instances are stored in the
Document yet.
"""
# Make sure that the _export folder exists, as it does not standard
# dowload from Github, nor it is committed to Github.
if path.startswith(EXPORT_DIR) and not os.path.exists(EXPORT_DIR):
os.mkdir(EXPORT_DIR)
# Now let DrawBot do its work, creating the page and saving it.
drawBot.newPage(self.w, self.h)
# For now to have something visible, draw a gray rectangle filling the page.
drawBot.fill(0.2) # Set fill color at 20% black.
drawBot.rect(0, 0, self.w, self.h) # Draw the rectangle.
# Create a Formatted String in white with specified font/fontSize.
fs = drawBot.FormattedString('My specimen', font='Georgia', fontSize=80, fill=1)
# Draw the FormattedString on this fixed position.
drawBot.text(fs, (50, self.h-100))
# Save the drawn DrawBot page into the _export folder, using `path` as file name.
drawBot.saveImage(path)
if __name__ == "__main__":
import doctest
doctest.testmod()[0]
| 2.734375 | 3 |
Sliding-Window/Fruits-Into-Baskets.py | umerkhan95/teaching-python | 0 | 12788525 | def fruits_into_baskets(fruits):
window_start = 0
max_length = 0
fruit_frequency = {}
# In this loop, we extend the range [window_start, window_end]
for window_end in range(len(fruits)):
right_fruit = fruits[window_end]
if right_fruit not in fruit_frequency:
fruit_frequency[right_fruit] = 0
fruit_frequency[right_fruit] += 1
# Shrink the sliding window, until we are left with 2 fruits in the fruit_frequency
while len(fruit_frequency) > 2:
left_fruit = fruits[window_start]
fruit_frequency[left_fruit] -= 1
if fruit_frequency[left_fruit] == 0:
del fruit_frequency[left_fruit]
window_start += 1 # shrink the window
# remember the maximum length so far
max_length = max(max_length, window_end - window_start + 1)
return max_length
| 3.5 | 4 |
bit_manipulation/0868_binary_gap/0868_binary_gap.py | zdyxry/LeetCode | 6 | 12788526 | <gh_stars>1-10
# -*- coding: utf-8 -*-
class Solution(object):
def binaryGap(self, N):
"""
:type N: int
:rtype: int
"""
pre = dist = 0
for i, c in enumerate(bin(N)[2:]):
if c == "1":
dist = max(dist, i - pre)
pre = i
return dist
print(Solution().binaryGap(5)) | 3.109375 | 3 |
tests/test_fds.py | CyberFlameGO/fds | 322 | 12788527 | <reponame>CyberFlameGO/fds
import unittest
from unittest.mock import patch
import pytest
import re
from fds.version import __version__
from fds.services.fds_service import FdsService
from fds.run import HooksRunner
BOOLS = [True, False]
# NOTE unittest.mock:_Call backport
def patch_unittest_mock_call_cls():
import sys
if sys.version_info.minor >= 8:
return
import unittest.mock
def _get_call_arguments(self):
if len(self) == 2:
args, kwargs = self
else:
name, args, kwargs = self
return args, kwargs
@property
def args(self):
return self._get_call_arguments()[0]
@property
def kwargs(self):
return self._get_call_arguments()[1]
unittest.mock._Call._get_call_arguments = _get_call_arguments
unittest.mock._Call.args = args
unittest.mock._Call.kwargs = kwargs
patch_unittest_mock_call_cls()
class TestFds(unittest.TestCase):
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_init_success(self, mock_git_service, mock_dvc_service):
fds_service = FdsService(mock_git_service, mock_dvc_service)
fds_service.init()
assert mock_git_service.init.called
assert mock_dvc_service.init.called
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_status_success(self, mock_git_service, mock_dvc_service):
fds_service = FdsService(mock_git_service, mock_dvc_service)
fds_service.status()
assert mock_git_service.status.called
assert mock_dvc_service.status.called
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_status_git_failure(self, mock_git_service, mock_dvc_service):
mock_git_service.status.side_effect = Exception
fds_service = FdsService(mock_git_service, mock_dvc_service)
self.assertRaises(Exception, mock_git_service.status)
self.assertRaises(Exception, fds_service.status)
assert mock_git_service.status.called
assert mock_dvc_service.status.notcalled
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_status_dvc_failure(self, mock_git_service, mock_dvc_service):
mock_dvc_service.status.side_effect = Exception
fds_service = FdsService(mock_git_service, mock_dvc_service)
self.assertRaises(Exception, fds_service.status)
self.assertRaises(Exception, mock_dvc_service.status)
assert mock_git_service.status.called
assert mock_dvc_service.status.called
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_add_success(self, mock_git_service, mock_dvc_service):
fds_service = FdsService(mock_git_service, mock_dvc_service)
fds_service.add(".")
assert mock_git_service.add.called
assert mock_dvc_service.add.called
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_add_git_failure(self, mock_git_service, mock_dvc_service):
mock_git_service.add.side_effect = Exception
fds_service = FdsService(mock_git_service, mock_dvc_service)
self.assertRaises(Exception, mock_git_service.add)
with self.assertRaises(Exception):
fds_service.add(".")
assert mock_git_service.add.called
assert mock_dvc_service.add.called
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_add_dvc_failure(self, mock_git_service, mock_dvc_service):
mock_dvc_service.add.side_effect = Exception
fds_service = FdsService(mock_git_service, mock_dvc_service)
with self.assertRaises(Exception):
fds_service.add(".")
self.assertRaises(Exception, mock_dvc_service.add)
assert mock_dvc_service.add.called
assert mock_git_service.add.notcalled
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_commit_success(self, mock_git_service, mock_dvc_service):
fds_service = FdsService(mock_git_service, mock_dvc_service)
fds_service.commit("some commit message", True)
assert mock_git_service.commit.called
assert mock_dvc_service.commit.called
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_commit_git_failure(self, mock_git_service, mock_dvc_service):
mock_git_service.commit.side_effect = Exception
fds_service = FdsService(mock_git_service, mock_dvc_service)
with self.assertRaises(Exception):
fds_service.commit("some commit message", True)
self.assertRaises(Exception, mock_git_service.commit)
assert mock_git_service.commit.called
assert mock_dvc_service.commit.called
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_commit_dvc_failure(self, mock_git_service, mock_dvc_service):
mock_dvc_service.commit.side_effect = Exception
fds_service = FdsService(mock_git_service, mock_dvc_service)
with self.assertRaises(Exception):
fds_service.commit("some commit message", False)
self.assertRaises(Exception, mock_dvc_service.commit)
assert mock_dvc_service.commit.called
assert mock_git_service.commit.notcalled
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_clone_dvc_failure(self, mock_git_service, mock_dvc_service):
mock_dvc_service.pull.side_effect = Exception
fds_service = FdsService(mock_git_service, mock_dvc_service)
with self.assertRaises(Exception):
fds_service.clone("https://github.com/dagshub/fds.git", None, None)
self.assertRaises(Exception, mock_dvc_service.pull)
mock_git_service.clone.assert_called_with("https://github.com/dagshub/fds.git", None)
@patch('fds.services.dvc_service.DVCService')
@patch('fds.services.git_service.GitService')
def test_clone_git_failure(self, mock_git_service, mock_dvc_service):
mock_git_service.clone.side_effect = Exception
fds_service = FdsService(mock_git_service, mock_dvc_service)
with self.assertRaises(Exception):
fds_service.clone("https://github.com/dagshub/fds.git", None, None)
self.assertRaises(Exception, mock_git_service.clone)
assert mock_dvc_service.pull.notcalled
class TestFdsHooks:
@pytest.mark.parametrize("dvc_preinstalled", BOOLS)
@pytest.mark.parametrize("install_prompt_accept", BOOLS)
@patch('fds.run.execute_command')
@patch('fds.run.get_confirm_from_user')
@patch('fds.services.fds_service.FdsService')
@patch('fds.run.which')
def test_dvc_installed(
self,
mock_which,
mock_fds_service,
mock_prompt,
mock_execute_command,
dvc_preinstalled: bool,
install_prompt_accept: bool
):
mock_which.return_value = dvc_preinstalled or None
mock_prompt.return_value = install_prompt_accept
hooks_runner = HooksRunner(
mock_fds_service.service,
mock_fds_service.printer,
mock_fds_service.logger,
)
ret = hooks_runner._ensure_dvc_installed()
mock_which.assert_called_with("dvc")
if dvc_preinstalled:
return
assert mock_prompt.call_count == 1
if not install_prompt_accept:
assert ret != 0
# TODO validate printer containing "install dvc manually"
return
assert ret == 0
assert mock_execute_command.call_count == 1
args = mock_execute_command.call_args_list[0].args[0]
assert re.findall(r"^pip3 install .*'dvc", args[0])
@pytest.mark.parametrize("git_preinstalled", BOOLS)
@patch('fds.run.sys.exit')
@patch('fds.services.fds_service.FdsService')
@patch('fds.run.which')
def test_git_installed(
self,
mock_which,
mock_fds_service,
mock_sys_exit,
git_preinstalled: bool,
):
mock_which.return_value = git_preinstalled or None
hooks_runner = HooksRunner(
mock_fds_service.service,
mock_fds_service.printer,
mock_fds_service.logger,
)
ret = hooks_runner._ensure_git_installed()
mock_which.assert_called_with("git")
if git_preinstalled:
assert ret == 0
return
assert mock_sys_exit.call_count == 1
assert 0 not in mock_sys_exit.called_with
@pytest.mark.parametrize("is_latest", BOOLS)
@pytest.mark.parametrize("install_prompt_accept", BOOLS)
@patch('fds.run.rerun_in_new_shell_and_exit')
@patch('fds.run.execute_command')
@patch('fds.run.get_confirm_from_user')
@patch('fds.services.fds_service.FdsService')
@patch('fds.run.requests.get')
def test_fds_update(
self,
mock_requests_get,
mock_fds_service,
mock_prompt,
mock_execute_command,
mock_rerun,
is_latest: bool,
install_prompt_accept: bool
):
mock_requests_get.return_value = type(
"Response",
(),
{
"json": lambda self: {
"info": {
"version": __version__ + ("b3" if not is_latest else "")
}
}
}
)()
mock_prompt.return_value = install_prompt_accept
hooks_runner = HooksRunner(
mock_fds_service.service,
mock_fds_service.printer,
mock_fds_service.logger,
)
ret = hooks_runner._ensure_fds_updated()
mock_requests_get.assert_called_with("https://pypi.python.org/pypi/fastds/json")
assert ret == 0
if is_latest:
return
assert mock_prompt.call_count == 1
# # TODO validate stdout contains "Should we upgrade..."
if not install_prompt_accept:
return
assert mock_execute_command.call_count == 1
lst = mock_execute_command.call_args_list[0]
assert re.findall(r"^pip3 install .*fastds.*--upgrade", lst.args[0][0])
assert mock_rerun.call_count == 1
mock_rerun.assert_called_with()
@pytest.mark.parametrize("raise_on_reject", BOOLS)
@pytest.mark.parametrize("service_preinitialized", BOOLS)
@pytest.mark.parametrize("initialize_prompt_accept", BOOLS)
@pytest.mark.parametrize("service_name", ["git", "dvc"])
@patch('fds.run.sys.exit')
@patch('fds.run.get_confirm_from_user')
@patch('fds.services.fds_service.FdsService')
def test_service_initialized(
self,
mock_fds_service,
mock_prompt,
mock_sys_exit,
raise_on_reject: bool,
service_preinitialized: bool,
initialize_prompt_accept: bool,
service_name: str,
tmpdir,
):
attr_name = f"{service_name}_service"
svc = getattr(mock_fds_service.service, attr_name)
fut_name = f"_ensure_{service_name}_initialized"
hooks_runner = HooksRunner(
mock_fds_service.service,
mock_fds_service.printer,
mock_fds_service.logger,
)
fut = getattr(hooks_runner, fut_name)
mock_prompt.return_value = initialize_prompt_accept
with patch.object(
svc,
"repo_path",
tmpdir.strpath,
), patch.object(
svc,
"is_initialized",
return_value=service_preinitialized,
), patch.object(
svc,
"init",
):
ret = fut()
assert svc.is_initialized.call_count == 1
if service_preinitialized:
assert ret == 0
return
assert mock_prompt.call_count == 1
if initialize_prompt_accept:
assert svc.init.call_count == 1
assert ret == 0
return
assert re.findall(
r"You can initialize.*{}.*manually by running".format(service_name),
mock_fds_service.printer.warn.call_args_list[0].args[0]
)
if raise_on_reject:
assert mock_sys_exit.call_count == 1
else:
assert 0 not in mock_sys_exit.called_with
| 2.40625 | 2 |
venv/Lib/site-packages/pdfminer/pslexer.py | richung99/digitizePlots | 202 | 12788528 | import re
import ply.lex as lex
states = (
('instring', 'exclusive'),
)
tokens = (
'COMMENT', 'HEXSTRING', 'INT', 'FLOAT', 'LITERAL', 'KEYWORD', 'STRING', 'OPERATOR'
)
delimiter = r'\(\)\<\>\[\]\{\}\/\%\s'
delimiter_end = r'(?=[%s]|$)' % delimiter
def t_COMMENT(t):
# r'^%!.+\n'
r'%.*\n'
pass
RE_SPC = re.compile(r'\s')
RE_HEX_PAIR = re.compile(r'[0-9a-fA-F]{2}|.')
@lex.TOKEN(r'<[0-9A-Fa-f\s]*>')
def t_HEXSTRING(t):
cleaned = RE_SPC.sub('', t.value[1:-1])
pairs = RE_HEX_PAIR.findall(cleaned)
token_bytes = bytes([int(pair, 16) for pair in pairs])
try:
t.value = token_bytes.decode('ascii')
except UnicodeDecodeError:
# should be kept as bytes
t.value = token_bytes
return t
@lex.TOKEN(r'(\-|\+)?[0-9]+' + delimiter_end)
def t_INT(t):
t.value = int(t.value)
return t
@lex.TOKEN(r'(\-|\+)?([0-9]+\.|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?' + delimiter_end)
def t_FLOAT(t):
t.value = float(t.value)
return t
RE_LITERAL_HEX = re.compile(r'#[0-9A-Fa-f]{2}')
@lex.TOKEN(r'/.+?' + delimiter_end)
def t_LITERAL(t):
newvalue = t.value[1:]
# If there's '#' chars in the literal, we much de-hex it
def re_sub(m):
# convert any hex str to int (without the # char) and the convert that
return bytes.fromhex(m.group(0)[1:]).decode('latin-1')
newvalue = RE_LITERAL_HEX.sub(re_sub , newvalue)
# If there's any lone # char left, remove them
newvalue = newvalue.replace('#', '')
t.value = newvalue
return t
def t_OPERATOR(t):
r'{|}|<<|>>|\[|\]'
return t
t_KEYWORD = r'.+?' + delimiter_end
def t_instring(t):
r'\('
t.lexer.value_buffer = []
t.lexer.string_startpos = t.lexpos
t.lexer.level = 1
t.lexer.begin('instring')
# The parens situation: it's complicated. We can have both escaped parens and unescaped parens.
# If they're escaped, there's nothing special, we unescape them and add them to the string. If
# they're not escaped, we have to count how many of them there are, to know when a rparen is the
# end of the string. The regular expression for this is messed up, so what we do is when we hit
# a paren, we look if the previous buffer ended up with a backslash. If it did, we don't to paren
# balancing.
def t_instring_lparen(t):
r'\('
is_escaped = t.lexer.value_buffer and t.lexer.value_buffer[-1].endswith('\\')
if is_escaped:
t.lexer.value_buffer[-1] = t.lexer.value_buffer[-1][:-1]
else:
t.lexer.level +=1
t.lexer.value_buffer.append('(')
def t_instring_rparen(t):
r'\)'
is_escaped = t.lexer.value_buffer and t.lexer.value_buffer[-1].endswith('\\')
if is_escaped:
t.lexer.value_buffer[-1] = t.lexer.value_buffer[-1][:-1]
else:
t.lexer.level -=1
if t.lexer.level == 0:
t.value = ''.join(t.lexer.value_buffer)
if any(ord(c) > 0x7f for c in t.value):
t.value = t.value.encode('latin-1')
t.type = "STRING"
t.lexpos = t.lexer.string_startpos
t.lexer.begin('INITIAL')
return t
else:
t.lexer.value_buffer.append(')')
RE_STRING_ESCAPE = re.compile(r'\\[btnfr\\]')
RE_STRING_OCTAL = re.compile(r'\\[0-7]{1,3}')
RE_STRING_LINE_CONT = re.compile(r'\\\n|\\\r|\\\r\n')
ESC_STRING = { 'b': '\b', 't': '\t', 'n': '\n', 'f': '\f', 'r': '\r', '\\': '\\' }
def repl_string_escape(m):
return ESC_STRING[m.group(0)[1]]
def repl_string_octal(m):
i = int(m.group(0)[1:], 8)
if i < 0xff: # we never want to go above 256 because it's unencodable
return chr(i)
else:
return m.group(0)
def t_instring_contents(t):
r'[^()]+'
s = t.value
s = RE_STRING_ESCAPE.sub(repl_string_escape, s)
s = RE_STRING_OCTAL.sub(repl_string_octal, s)
s = RE_STRING_LINE_CONT.sub('', s)
t.lexer.value_buffer.append(s)
t_instring_ignore = ''
t_ignore = ' \t\r\n'
# Error handling rule
def t_error(t):
print("Illegal character '%r'" % t.value[0])
t.lexer.skip(1)
t_instring_error = t_error
lexer = lex.lex() | 2.4375 | 2 |
testFile1.py | Roxannelevi/Oksana | 0 | 12788529 | name = "Sergey"
department = "QA"
year = 2020
month = 10
print(name, department, year, month)
print(name + department)
if name == "Sergey":
print(name)
else:
print("name is different")
weather = 'rainy'
temperature = 21
print(weather)
print(temperature)
| 4.15625 | 4 |
doc_scanner/math_utils.py | guoli-lyu/document-scanner | 2 | 12788530 | import numpy as np
import pandas as pd
def intersection_cartesian(L1: pd.DataFrame, L2: pd.DataFrame):
"""
Compute cartesian coordinates of intersection points given two list of lines in general form.
General form for a line: Ax+By+C=0
:param L1:
:param L2:
:return:
"""
if not {'A', 'B', 'C'}.issubset(set(L1.columns)) or not {'A', 'B', 'C'}.issubset(set(L2.columns)):
raise ValueError('L1 and L2 should both contains columns A, B and C, which depicts lines in general form')
d = (L1['A'] * L2['B'] - L1['B'] * L2['A'])
dx = L1['B'] * L2['C'] - L1['C'] * L2['B']
dy = L1['C'] * L2['A'] - L1['A'] * L2['C']
x = dx / d
y = dy / d
return list(zip(x.values.tolist(), y.values.tolist()))
def points2line(p1, p2):
"""
Compute Ax+By+C=0 given a list of point [(x1,y1)] and [(x2,y2)].
Single point is also acceptable.
:param p1: point in tuple or array (x1,y1) or a list of points in tuple or array [(x1_1,y1_1),(x1_2,y1_2),...]
:param p2: point in tuple or array (x2,y2) or a list of points in tuple or array [(x2_1,y2_1),(x2_2,y2_2),...]
:return: pd.DataFrame objects of lines in general form(Ax+By+C=0)
"""
p1 = np.array(p1)
p2 = np.array(p2)
if p1.dtype == np.object or p2.dtype == np.object:
raise ValueError("p1 and p2 should matrix alike")
elif len(p1.shape) == 2 and len(p2.shape) == 2:
if p1.shape[1] != 2 or p2.shape[1] != 2:
raise ValueError("p1 and p2 should be matrix with column size of exactly 2")
elif len(p1.shape) == 1 and len(p1) == 2 and len(p1.shape) == 1 and len(p2) == 2:
p1 = p1.reshape(-1, 2)
p2 = p2.reshape(-1, 2)
else:
raise ValueError("Invalid p1 and p2")
a = (p1[:, 1] - p2[:, 1])
b = (p2[:, 0] - p1[:, 0])
c = (p1[:, 0] * p2[:, 1] - p2[:, 0] * p1[:, 1])
return pd.DataFrame([a, b, c], index=['A', 'B', 'C']).T
def find_y_on_lines(lines: np.array, x: np.array):
"""
find y of a list of x on a list of lines that in polar form.
:param lines:
:param x:
:return: a list of points, 1th dimension for different x and 2th dimension for different lines
"""
if len(lines) == 0:
return lines
lines = np.array(lines)
if lines.dtype == np.object:
raise ValueError("lines should be matrix alike")
elif len(lines.shape) == 1:
if len(lines) == 2:
lines = lines.reshape(-1, 2)
else:
raise ValueError("the length of line vector should 2")
elif len(lines.shape) == 2:
if lines.shape[1] != 2:
raise ValueError("lines should be matrix with column size of exactly 2")
else:
raise ValueError("Invalid lines")
x = np.array(x)
if x.dtype == np.object:
raise ValueError("x should be matrix alike")
rho = lines[:, 1].reshape(-1, 1)
phi = lines[:, 0].reshape(-1, 1)
y = (rho - x * np.cos(phi)) / np.sin(phi)
return y
def find_points_on_lines(lines: np.array, x: np.array):
"""
find points of a list of x on a list of lines that in polar form.
:param lines:
:param x:
:return: a list of points, 1th dimension for different x and 2th dimension for different lines
"""
if len(lines) == 0:
return lines
lines = np.array(lines)
if len(lines.shape) == 1:
if len(lines) == 2:
lines = lines.reshape(-1, 2)
x = np.array(x)
y = find_y_on_lines(lines, x)
points = list()
for ix in range(len(x)):
points_on_a_line = np.zeros((len(lines), 2))
points_on_a_line[:, 0] = x[ix]
points_on_a_line[:, 1] = y[:, ix]
points.append(list(map(lambda x: tuple(x), points_on_a_line.tolist())))
return points
def interpolate_pixels_along_line(p1: np.array or tuple, p2: np.array or tuple, width=2):
"""Uses Xiaolin Wu's line algorithm to interpolate all of the pixels along a
straight line, given two points (x0, y0) and (x1, y1)
Wikipedia article containing pseudo code that function was based off of:
http://en.wikipedia.org/wiki/Xiaolin_Wu's_line_algorithm
Given by Rick(https://stackoverflow.com/users/2025958/rick)
on https://stackoverflow.com/questions/24702868/python3-pillow-get-all-pixels-on-a-line.
"""
if type(p1) is np.ndarray and type(p2) is np.ndarray:
(x1, y1) = p1.flatten()
(x2, y2) = p2.flatten()
elif len(p1) == 2 and len(p2) == 2:
(x1, y1) = p1
(x2, y2) = p2
else:
raise TypeError("p1 and p2 must be tuple or ndarray depicting points")
pixels = []
steep = np.abs(y2 - y1) > np.abs(x2 - x1)
# Ensure that the path to be interpolated is shallow and from left to right
if steep:
t = x1
x1 = y1
y1 = t
t = x2
x2 = y2
y2 = t
if x1 > x2:
t = x1
x1 = x2
x2 = t
t = y1
y1 = y2
y2 = t
dx = x2 - x1
dy = y2 - y1
gradient = dy / dx # slope
# Get the first given coordinate and add it to the return list
x_end = np.round(x1)
y_end = y1 + (gradient * (x_end - x1))
xpxl0 = x_end
ypxl0 = np.round(y_end)
if steep:
pixels.extend([(ypxl0, xpxl0), (ypxl0 + 1, xpxl0)])
else:
pixels.extend([(xpxl0, ypxl0), (xpxl0, ypxl0 + 1)])
interpolated_y = y_end + gradient
# Get the second given coordinate to give the main loop a range
x_end = np.round(x2)
y_end = y2 + (gradient * (x_end - x2))
xpxl1 = x_end
ypxl1 = np.round(y_end)
# Loop between the first x coordinate and the second x coordinate, interpolating the y coordinates
for x in np.arange(xpxl0 + 1, xpxl1):
if steep:
pixels.extend([(np.floor(interpolated_y) + i, x) for i in range(1 - width, width + 1)])
else:
pixels.extend([(x, np.floor(interpolated_y) + i) for i in range(1 - width, width + 1)])
interpolated_y += gradient
# Add the second given coordinate to the given list
if steep:
pixels.extend([(ypxl1, xpxl1), (ypxl1 + 1, xpxl1)])
else:
pixels.extend([(xpxl1, ypxl1), (xpxl1, ypxl1 + 1)])
# convert to int
return list(map(lambda x: tuple(x), np.array(pixels, dtype=np.int)))
| 3.65625 | 4 |
asn1PERser/test/per/encoder/test_per_encode_enumerated.py | erupikus/asn1PERser | 3 | 12788531 | import pytest
from pyasn1.type.namedval import NamedValues
from asn1PERser.codec.per.encoder import encode as per_encoder
from asn1PERser.classes.data.builtin.EnumeratedType import EnumeratedType
from asn1PERser.classes.types.constraint import ExtensionMarker
def SCHEMA_my_enum(enumerationRoot_list, extensionMarker_value=False):
class MyEnum(EnumeratedType):
'''
MyEnum ::= ENUMERATED {
e0,
e1,
.
.
.
eN-1
eN
}
'''
subtypeSpec = ExtensionMarker(extensionMarker_value)
enumerationRoot = NamedValues(
*[(item, index) for index, item in enumerate(enumerationRoot_list)]
)
extensionAddition = NamedValues(
)
namedValues = enumerationRoot + extensionAddition
return MyEnum
def SCHEMA_my_ext_enum(enumerationRoot_list, extensionAddition_list, extensionMarker_value=False):
class MyEnum(EnumeratedType):
'''
MyEnum::= ENUMERATED
{
e0,
e1,
.
.
.
eN - 1
eN,
...,
eN+1
.
.
.
eM-1,
eM
}
'''
subtypeSpec = ExtensionMarker(extensionMarker_value)
enumerationRoot = NamedValues(
*[(item, index) for index, item in enumerate(enumerationRoot_list)]
)
extensionAddition = NamedValues(
*[(item, index) for index, item in enumerate(extensionAddition_list, start=len(enumerationRoot_list))]
)
namedValues = enumerationRoot + extensionAddition
return MyEnum
def DATA_my_enum(enum, value):
return enum(value)
short_enum = ['a0', 'a1']
enumeration_list = ['e0', 'e1', 'e2', 'e3', 'e4', 'e5', 'e6', 'e7', 'e8', 'e9',
'e10', 'e11', 'e12', 'e13', 'e14', 'e15', 'e16', 'e17', 'e18', 'e19',
'e20', 'e21', 'e22', 'e23', 'e24', 'e25', 'e26', 'e27', 'e28', 'e29',
'e30', 'e31', 'e32', 'e33', 'e34', 'e35', 'e36', 'e37', 'e38', 'e39',
'e40', 'e41', 'e42', 'e43', 'e44', 'e45', 'e46', 'e47', 'e48', 'e49',
'e50', 'e51', 'e52', 'e53', 'e54', 'e55', 'e56', 'e57', 'e58', 'e59',
'e60', 'e61', 'e62', 'e63', 'e64', 'e65', 'e66', 'e67', 'e68', 'e69',
'e70', 'e71', 'e72', 'e73', 'e74', 'e75', 'e76', 'e77', 'e78', 'e79',
'e80', 'e81', 'e82', 'e83', 'e84', 'e85', 'e86', 'e87', 'e88', 'e89',
'e90', 'e91', 'e92', 'e93', 'e94', 'e95', 'e96', 'e97', 'e98', 'e99',
'e100', 'e101', 'e102', 'e103', 'e104', 'e105', 'e106', 'e107', 'e108', 'e109',
'e110', 'e111', 'e112', 'e113', 'e114', 'e115', 'e116', 'e117', 'e118', 'e119',
'e120', 'e121', 'e122', 'e123', 'e124', 'e125', 'e126', 'e127', 'e128', 'e129',
'e130', 'e131', 'e132', 'e133', 'e134', 'e135', 'e136', 'e137', 'e138', 'e139',
'e140', 'e141', 'e142', 'e143', 'e144', 'e145', 'e146', 'e147', 'e148', 'e149',
'e150', 'e151', 'e152', 'e153', 'e154', 'e155', 'e156', 'e157', 'e158', 'e159',
'e160', 'e161', 'e162', 'e163', 'e164', 'e165', 'e166', 'e167', 'e168', 'e169',
'e170', 'e171', 'e172', 'e173', 'e174', 'e175', 'e176', 'e177', 'e178', 'e179',
'e180', 'e181', 'e182', 'e183', 'e184', 'e185', 'e186', 'e187', 'e188', 'e189',
'e190', 'e191', 'e192', 'e193', 'e194', 'e195', 'e196', 'e197', 'e198', 'e199',
'e200', 'e201', 'e202', 'e203', 'e204', 'e205', 'e206', 'e207', 'e208', 'e209',
'e210', 'e211', 'e212', 'e213', 'e214', 'e215', 'e216', 'e217', 'e218', 'e219',
'e220', 'e221', 'e222', 'e223', 'e224', 'e225', 'e226', 'e227', 'e228', 'e229',
'e230', 'e231', 'e232', 'e233', 'e234', 'e235', 'e236', 'e237', 'e238', 'e239',
'e240', 'e241', 'e242', 'e243', 'e244', 'e245', 'e246', 'e247', 'e248', 'e249',
'e250', 'e251', 'e252', 'e253', 'e254', 'e255', 'e256', 'e257', 'e258', 'e259']
@pytest.mark.parametrize("enumerated, encoded", [
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:2]), 'e0'), '00'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:2]), 'e1'), '80'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:10]), 'e9'), '90'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:17]), 'e9'), '48'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:33]), 'e9'), '24'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:33]), 'e32'), '80'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:100]), 'e98'), 'C4'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130]), 'e126'), '7E'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130]), 'e127'), '7F'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130]), 'e128'), '80'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260]), 'e128'), '0080'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260]), 'e254'), '00FE'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260]), 'e255'), '00FF'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260]), 'e256'), '0100'),
])
def test_no_extension_marker_enumerated_can_be_encoded(enumerated, encoded):
assert per_encoder(enumerated) == bytearray.fromhex(encoded)
@pytest.mark.parametrize("enumerated, encoded", [
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:2], extensionMarker_value=True), 'e0'), '00'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:2], extensionMarker_value=True), 'e1'), '40'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:10], extensionMarker_value=True), 'e9'), '48'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:17], extensionMarker_value=True), 'e9'), '24'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:33], extensionMarker_value=True), 'e9'), '12'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:33], extensionMarker_value=True), 'e32'), '40'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:100], extensionMarker_value=True), 'e98'), '62'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130], extensionMarker_value=True), 'e126'), '3F00'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130], extensionMarker_value=True), 'e127'), '3F80'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:130], extensionMarker_value=True), 'e128'), '4000'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260], extensionMarker_value=True), 'e128'), '000080'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260], extensionMarker_value=True), 'e254'), '0000FE'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260], extensionMarker_value=True), 'e255'), '0000FF'),
(DATA_my_enum(SCHEMA_my_enum(enumerationRoot_list=enumeration_list[0:260], extensionMarker_value=True), 'e256'), '000100'),
])
def test_extension_marker_is_present_and_extension_addition_is_empty_but_value_is_from_root_can_be_encoded(enumerated, encoded):
assert per_encoder(enumerated) == bytearray.fromhex(encoded)
@pytest.mark.parametrize("enumerated, encoded", [
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:2], extensionAddition_list=short_enum, extensionMarker_value=True), 'e0'), '00'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:2], extensionAddition_list=short_enum, extensionMarker_value=True), 'e1'), '40'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:10], extensionAddition_list=short_enum, extensionMarker_value=True), 'e9'), '48'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:17], extensionAddition_list=short_enum, extensionMarker_value=True), 'e9'), '24'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:33], extensionAddition_list=short_enum, extensionMarker_value=True), 'e9'), '12'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:33], extensionAddition_list=short_enum, extensionMarker_value=True), 'e32'), '40'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:100], extensionAddition_list=short_enum, extensionMarker_value=True), 'e98'), '62'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:130], extensionAddition_list=short_enum, extensionMarker_value=True), 'e126'), '3F00'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:130], extensionAddition_list=short_enum, extensionMarker_value=True), 'e127'), '3F80'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:130], extensionAddition_list=short_enum, extensionMarker_value=True), 'e128'), '4000'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:260], extensionAddition_list=short_enum, extensionMarker_value=True), 'e128'), '000080'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:260], extensionAddition_list=short_enum, extensionMarker_value=True), 'e254'), '0000FE'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:260], extensionAddition_list=short_enum, extensionMarker_value=True), 'e255'), '0000FF'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=enumeration_list[0:260], extensionAddition_list=short_enum, extensionMarker_value=True), 'e256'), '000100'),
])
def test_extension_marker_is_present_and_extension_addition_is_not_empty_but_value_is_from_root_can_be_encoded(enumerated, encoded):
assert per_encoder(enumerated) == bytearray.fromhex(encoded)
@pytest.mark.parametrize("enumerated, encoded", [
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:2], extensionMarker_value=True), 'e0'), '80'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:2], extensionMarker_value=True), 'e1'), '81'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:10], extensionMarker_value=True), 'e9'), '89'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:17], extensionMarker_value=True), 'e9'), '89'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:33], extensionMarker_value=True), 'e9'), '89'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:33], extensionMarker_value=True), 'e32'), 'A0'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:100], extensionMarker_value=True), 'e98'), 'C00162'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:130], extensionMarker_value=True), 'e126'), 'C0017E'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:130], extensionMarker_value=True), 'e127'), 'C0017F'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:130], extensionMarker_value=True), 'e128'), 'C00180'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:260], extensionMarker_value=True), 'e128'), 'C00180'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:260], extensionMarker_value=True), 'e254'), 'C001FE'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:260], extensionMarker_value=True), 'e255'), 'C001FF'),
(DATA_my_enum(SCHEMA_my_ext_enum(enumerationRoot_list=short_enum, extensionAddition_list=enumeration_list[0:260], extensionMarker_value=True), 'e256'), 'C0020100'),
])
def test_extension_marker_is_present_and_value_is_from_extension_can_be_encoded(enumerated, encoded):
assert per_encoder(enumerated) == bytearray.fromhex(encoded)
| 2.171875 | 2 |
test/test_four_bit_mac.py | Atman-Kar/reram-architectures | 2 | 12788532 | from error.error_crossbar import *
from basic_blocks.mvm_four_bit import mvm_four_by_four
import unittest
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_dir_path = os.path.abspath(os.path.join(dir_path, os.pardir))
sys.path.insert(0, parent_dir_path)
class TestFourBitMAC(unittest.TestCase):
'''
Test some multiplier structures
'''
def test_multiply_and_accumulate_operation_four_by_four(self):
'''
Test the multiplication:
| 11 13| | 3 | = | 150 |
| 2 6| | 9 | = | 60 |
Remember, the matrix above is stored as its transpose in the ReRAM crossbar
'''
mat = mvm_four_by_four()
mat.set_conductance_matrix([[11, 2],
[13, 6]])
input_v = [3, 9]
mat.crossbar_multiply(input_v=input_v)
expected_matrix = [150, 60]
self.assertEqual(mat.get_shift_reg_values(), expected_matrix)
if __name__ == "__main__":
unittest.main()
| 2.640625 | 3 |
split_image.py | eguzman3/BMI-CAAE | 1 | 12788533 | <filename>split_image.py
from PIL import Image
import glob
import sys
import os
# USAGE:
# python <split_image.py> <input_dir> <output_dir>
# where input_dir conatains PNG files (outputs from the model)
# and output_dir will contain 10 PNG files labeled by bucket number
if len(sys.argv) != 3:
print("WRong args")
exit(1)
output_dir = sys.argv[2]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
file_names = glob.glob(sys.argv[1] + "/*.png")
for name in file_names:
basename = os.path.basename(name)
im = Image.open(name)
print("Opened image: {}".format(name))
for i in range(10):
x = 148 + (140*i)
y = 8
crop_rectangle = (x, y, x+128, y+128)
cropped_im = im.crop(crop_rectangle)
cropped_im.save(output_dir + "/Bucket_" + str(i) + "_" + basename)
| 3.140625 | 3 |
problems/484.Find_Permutation/stefan_1line_sort.py | subramp-prep/leetcode | 0 | 12788534 | def findPermutation(self, s):
return sorted(range(1, len(s) + 2), cmp=lambda i, j: -('I' not in s[j - 1:i - 1]))
# My 1 - liner tells sorted that the(larger) number i comes before
# the(smaller) number j iff they're both under the same D-streak, i.e.,
# iff there's no I between them. (I'm not totally sure that i will always
# be the larger number, but it appears to be the case).
# https://discuss.leetcode.com/topic/
| 3.53125 | 4 |
activitysimulations/watchingsimulation.py | TheImaginaryOne/movies-py | 0 | 12788535 | <reponame>TheImaginaryOne/movies-py
from domainmodel.movie import Movie
from domainmodel.user import User
from domainmodel.review import Review
class MovieWatchingSimulation:
pass | 1.171875 | 1 |
algs4/merge.py | dumpmemory/algs4-py | 230 | 12788536 | """
Sorts a sequence of strings from standard input using merge sort.
% more tiny.txt
S O R T E X A M P L E
% python merge.py < tiny.txt
A E E L M O P R S T X [ one string per line ]
% more words3.txt
bed bug dad yes zoo ... all bad yet
% python merge.py < words3.txt
all bad bed bug dad ... yes yet zoo [ one string per line ]
"""
class Merge:
@classmethod
def merge(cls, arr, lo, mid, hi):
aux = list(arr) # copy to aux
i = lo
j = mid + 1
k = lo
while k <= hi:
if i > mid:
arr[k] = aux[j]
j += 1
elif j > hi:
arr[k] = aux[i]
i += 1
elif aux[i] < aux[j]:
arr[k] = aux[i]
i += 1
else:
arr[k] = aux[j]
j += 1
k += 1
@classmethod
def mergesort(cls, arr, lo, hi):
if lo >= hi:
return
mid = (lo + hi) // 2
cls.mergesort(arr, lo, mid)
cls.mergesort(arr, mid + 1, hi)
cls.merge(arr, lo, mid, hi)
return arr
@classmethod
def sort(cls, arr):
return cls.mergesort(arr, 0, len(arr) - 1)
@classmethod
def is_sorted(cls, arr):
for i in range(1, len(arr)):
if arr[i] < arr[i-1]:
return False
return True
if __name__ == '__main__':
import sys
items = []
for line in sys.stdin:
items.extend(line.split())
print(' items: ', items)
print('sort items: ', Merge.sort(items))
assert Merge.is_sorted(items)
| 4.09375 | 4 |
pick_choose.py | iomintz/python-snippets | 2 | 12788537 | <gh_stars>1-10
from math import factorial as fac
P = lambda n, r: fac(n) // fac(n - r)
C = lambda n, r: P(n, r) // fac(r)
| 2.125 | 2 |
accounts/views.py | Xavier-Cliquennois/ac-mediator | 9 | 12788538 | <reponame>Xavier-Cliquennois/ac-mediator<filename>accounts/views.py
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from accounts.forms import RegistrationForm, ReactivationForm
from accounts.models import ServiceCredentials, Account
from services.mgmt import get_available_services, get_service_by_id
from services.acservice.constants import ENDUSER_AUTH_METHOD
from ac_mediator.exceptions import *
from utils.encryption import create_hash
def registration(request):
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('home'))
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
account = form.save()
account.is_active = False
account.save()
account.send_activation_email()
return render(request, 'accounts/registration.html', {'form': None})
else:
form = RegistrationForm()
return render(request, 'accounts/registration.html', {'form': form})
def activate_account(request, username, uid_hash):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('home'))
try:
account = Account.objects.get(username__iexact=username)
except Account.DoesNotExist:
return render(request, 'accounts/activate.html', {'user_does_not_exist': True})
new_hash = create_hash(account.id)
if new_hash != uid_hash:
return render(request, 'accounts/activate.html', {'decode_error': True})
account.is_active = True
account.save()
return render(request, 'accounts/activate.html', {'all_ok': True})
def resend_activation_emmil(request):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('home'))
if request.method == 'POST':
form = ReactivationForm(request.POST)
if form.is_valid():
account = form.cleaned_data['account']
account.send_activation_email()
return render(request, 'accounts/resend_activation.html', {'form': None})
else:
form = ReactivationForm()
return render(request, 'accounts/resend_activation.html', {'form': form})
@login_required
def home(request):
return render(request, 'accounts/home.html')
@login_required
def about(request):
return render(request, 'accounts/about.html')
@login_required
def link_services(request):
linkable_services_info = list()
non_linkable_services_info = list()
for service in get_available_services():
if service.supports_auth(ENDUSER_AUTH_METHOD):
is_linked = False
try:
service.get_enduser_token(request.user)
is_linked = True
except (ACException, ACAPIException):
pass
linkable_services_info.append((
service,
is_linked,
))
else:
non_linkable_services_info.append(service)
tvars = {'linkable_services_info': linkable_services_info,
'non_linkable_services_info': non_linkable_services_info}
return render(request, 'accounts/link_services.html', tvars)
@login_required
def link_service_callback(request, service_id):
try:
service = get_service_by_id(service_id)
except ACServiceDoesNotExist:
service = None
code = request.GET.get('code', None)
if code is None:
print('There were errors in the redirect from service: {0}'.format(request.GET.get('error', 'unknown')))
tvars = {
'errors': service is None or code is None,
'service_id': service.id if service is not None else None,
'code': request.GET.get('code'),
'complete': False,
}
return render(request, 'accounts/link_service_callback.html', tvars)
def store_service_credentials_helper(credentials, account, service_id):
# Store credentials (replace existing ones if needed)
service_credentials, is_new = ServiceCredentials.objects.get_or_create(
account=account, service_id=service_id)
service_credentials.credentials = credentials
service_credentials.save()
@login_required
def link_service_get_token(request, service_id):
service = get_service_by_id(service_id) # No need to check as is called after link_service_callback
# Request credentials
success, credentials = service.request_credentials(request.GET.get('code'))
if success:
store_service_credentials_helper(credentials, request.user, service.id)
else:
# Delete credentials (if existing)
ServiceCredentials.objects.filter(account=request.user, service_id=service_id).delete()
tvars = {
'errors': not success,
'complete': True,
}
return render(request, 'accounts/link_service_callback.html', tvars)
@login_required
def unlink_service(request, service_id):
ServiceCredentials.objects.filter(account=request.user, service_id=service_id).delete()
return HttpResponseRedirect(reverse('link_services'))
| 1.890625 | 2 |
Chinese/HPS3D_SDK_ROS_Demo/devel/lib/python2.7/dist-packages/hps_camera/msg/__init__.py | hypersen/HPS3D_SDK | 13 | 12788539 | <filename>Chinese/HPS3D_SDK_ROS_Demo/devel/lib/python2.7/dist-packages/hps_camera/msg/__init__.py
from ._PointCloudData import *
from ._distance import *
from ._measureData import *
| 1.023438 | 1 |
mir3/modules/unsupervised/detection/threshold/tests.py | pymir3/pymir3 | 12 | 12788540 | import numpy
import mir3.data.linear_decomposition as ld
import mir3.module
class Tests(mir3.module.Module):
def get_help(self):
return """computes the values at which the threshold should be tested"""
def build_arguments(self, parser):
parser.add_argument('-n','--number-values', default=10, type=int,
help="""number of thresholds to consider (default:
%(default)s)""")
parser.add_argument('--use-max', action='store_true', default=False,
help="""use the maximum for the upper threshold
bound, otherwise use another statistic based on
outlier detection""")
parser.add_argument('infile', nargs='+', help="""linear decomposition
files""")
def run(self, args):
decompositions = []
for filename in args.infile:
with open(filename, 'rb') as handler:
decompositions.append(
ld.LinearDecomposition().load(handler))
for level in self.get_levels(decompositions,
args.number_values,
args.use_max):
print level
def get_levels(self, decompositions, number_values, use_max=False):
"""Computes threshold levels that should be tested.
Based on a list of linear decompositions, uses some heuristics to find
good threshold values.
Args:
decompositions: list of decompositions used to compute thresholds.
number_values: number of threshold values.
use_max: flag indicating that the maximum value of the activations
should be the upper bound.
Returns:
Numpy nparray with the thresholds.
"""
# Initialized bounds
minLevel = float('-inf')
maxLevel = float('-inf')
# Evaluates each decomposition
for d in decompositions:
# Evaluates intruments one at a time
instruments = set([k[0] for k in d.data.right.keys()])
for instrument in instruments:
A_instrument = [] # Activation for the instrument
# Evaluates each note
notes = set([k[1] for k in d.data.right.keys()
if k[0] == instrument])
for note in notes:
# For now, if the activation has more than one line, just
# merges them all
datas = [d.data.right[k].reshape(-1)
for k in d.data.right.keys()
if k[0] == instrument and k[1] == note]
A = numpy.hstack(datas) # Activations for the note
A_instrument.append(A)
A_instrument = numpy.hstack(A_instrument)
# Levels can only increase as we move from one instrument to
# another.
# Chooses method to compute max activation
if use_max:
maxLevel = max(maxLevel, numpy.max(A_instrument))
else:
maxLevel = max(maxLevel, numpy.mean(A_instrument) + \
5*numpy.std(A_instrument))
minLevel = max(minLevel, numpy.mean(A_instrument))
# Gets a range of levels to test
return numpy.linspace(minLevel, maxLevel, number_values)
| 2.765625 | 3 |
course/views.py | ZzzD97/Graduate-master | 0 | 12788541 | from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse
from rest_framework.response import Response
from rest_framework.utils import json
from rest_framework.viewsets import ViewSetMixin
from course import models
from rest_framework.views import APIView
from rest_framework import serializers, generics
from course import models
from arrange import models
from rest_framework import exceptions
from django.contrib.auth.views import LoginView, LogoutView
# Create your views here.
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = models.CourseBaseInfo
fields = "__all__"
class ClassroomSerializer(serializers.ModelSerializer):
class Meta:
model = models.ClassroomInfo
fields = "__all__"
class ClassroomDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = models.ClassroomInfo.objects.all()
serializer_class = ClassroomSerializer
class CourseDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = models.CourseBaseInfo.objects.all()
serializer_class = CourseSerializer
class CourseView(APIView):
def get(self, request, *args, **kwargs):
"""
课程详细接口
:param request:
:param args:
:param kwargs:
:return:
"""
ret = {'code': 1000, 'data': None}
try:
pk = kwargs.get('pk') # 课程id
# 课程详细对象
obj = models.CourseBaseInfo.objects.filter(pk=pk).first()
ser = CourseSerializer(instance=obj, many=False)
ret['data'] = ser.data
except Exception as e:
ret['code'] = 1001
ret['error'] = "获取课程失败"
return Response(ret)
| 2.0625 | 2 |
observations/r/capm.py | hajime9652/observations | 199 | 12788542 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def capm(path):
"""Stock Market Data
monthly observations from 1960–01 to 2002–12
*number of observations* : 516
A time serie containing :
rfood
excess returns food industry
rdur
excess returns durables industry
rcon
excess returns construction industry
rmrf
excess returns market portfolio
rf
riskfree return
most of the above data are from Kenneth French's data library at
http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `capm.csv`.
Returns:
Tuple of np.ndarray `x_train` with 516 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'capm.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/Capm.csv'
maybe_download_and_extract(path, url,
save_file_name='capm.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 3.203125 | 3 |
src/tests/test_pagure_flask_api_plugins_view.py | yifengyou/learn-pagure | 0 | 12788543 | # -*- coding: utf-8 -*-
"""
(c) 2019 - Copyright Red Hat Inc
Authors:
<NAME> <<EMAIL>>
"""
from __future__ import unicode_literals, absolute_import
import unittest
import sys
import os
import json
from mock import patch
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
)
import tests # noqa: E402
class PagureFlaskApiPluginViewtests(tests.Modeltests):
"""Tests for the flask API of pagure for viewing plugins"""
def test_view_plugin(self):
"""Test viewing every plugin available in pagure."""
output = self.app.get("/api/0/_plugins")
self.assertEqual(output.status_code, 200)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
data,
{
"plugins": [
{"Block Un-Signed commits": []},
{"Block non fast-forward pushes": ["branches"]},
{"Fedmsg": []},
{
"IRC": [
"server",
"port",
"room",
"nick",
"nick_pass",
"join",
"ssl",
]
},
{"Mail": ["mail_to"]},
{"Mirroring": ["target", "public_key", "last_log"]},
{"Pagure": []},
{
"Pagure CI": [
"ci_type",
"ci_url",
"ci_job",
"active_commit",
"active_pr",
]
},
{"Pagure requests": []},
{"Pagure tickets": []},
{"Prevent creating new branches by git push": []},
{"Read the Doc": ["api_url", "api_token", "branches"]},
],
"total_plugins": 12,
},
)
@patch.dict("pagure.config.config", {"DISABLED_PLUGINS": ["IRC"]})
def test_view_plugin_disabled(self):
"""Test viewing every plugin available in pagure with one plugin disabled."""
output = self.app.get("/api/0/_plugins")
self.assertEqual(output.status_code, 200)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
data,
{
"plugins": [
{"Block Un-Signed commits": []},
{"Block non fast-forward pushes": ["branches"]},
{"Fedmsg": []},
{"Mail": ["mail_to"]},
{"Mirroring": ["target", "public_key", "last_log"]},
{"Pagure": []},
{
"Pagure CI": [
"ci_type",
"ci_url",
"ci_job",
"active_commit",
"active_pr",
]
},
{"Pagure requests": []},
{"Pagure tickets": []},
{"Prevent creating new branches by git push": []},
{"Read the Doc": ["api_url", "api_token", "branches"]},
],
"total_plugins": 12,
},
)
if __name__ == "__main__":
unittest.main(verbosity=2)
| 1.882813 | 2 |
simple_history/tests/tests/test_commands.py | mikhsol/django-simple-history | 1 | 12788544 | from contextlib import contextmanager
from datetime import datetime
from six.moves import cStringIO as StringIO
from django.test import TestCase
from django.core import management
from simple_history import models as sh_models
from simple_history.management.commands import populate_history
from .. import models
@contextmanager
def replace_registry(new_value=None):
hidden_registry = sh_models.registered_models
sh_models.registered_models = new_value or {}
try:
yield
except Exception:
raise
finally:
sh_models.registered_models = hidden_registry
class TestPopulateHistory(TestCase):
command_name = 'populate_history'
command_error = (management.CommandError, SystemExit)
def test_no_args(self):
out = StringIO()
management.call_command(self.command_name,
stdout=out, stderr=StringIO())
self.assertIn(populate_history.Command.COMMAND_HINT, out.getvalue())
def test_bad_args(self):
test_data = (
(populate_history.Command.MODEL_NOT_HISTORICAL, ("tests.place",)),
(populate_history.Command.MODEL_NOT_FOUND, ("invalid.model",)),
(populate_history.Command.MODEL_NOT_FOUND, ("bad_key",)),
)
for msg, args in test_data:
out = StringIO()
self.assertRaises(self.command_error, management.call_command,
self.command_name, *args,
stdout=StringIO(), stderr=out)
self.assertIn(msg, out.getvalue())
def test_auto_populate(self):
models.Poll.objects.create(question="Will this populate?",
pub_date=datetime.now())
models.Poll.history.all().delete()
management.call_command(self.command_name, auto=True,
stdout=StringIO(), stderr=StringIO())
self.assertEqual(models.Poll.history.all().count(), 1)
def test_populate_with_custom_batch_size(self):
models.Poll.objects.create(question="Will this populate?",
pub_date=datetime.now())
models.Poll.history.all().delete()
management.call_command(self.command_name, auto=True, batchsize=500,
stdout=StringIO(), stderr=StringIO())
self.assertEqual(models.Poll.history.all().count(), 1)
def test_specific_populate(self):
models.Poll.objects.create(question="Will this populate?",
pub_date=datetime.now())
models.Poll.history.all().delete()
models.Book.objects.create(isbn="9780007117116")
models.Book.history.all().delete()
management.call_command(self.command_name, "tests.book",
stdout=StringIO(), stderr=StringIO())
self.assertEqual(models.Book.history.all().count(), 1)
self.assertEqual(models.Poll.history.all().count(), 0)
def test_failing_wont_save(self):
models.Poll.objects.create(question="Will this populate?",
pub_date=datetime.now())
models.Poll.history.all().delete()
self.assertRaises(self.command_error,
management.call_command, self.command_name,
"tests.poll", "tests.invalid_model",
stdout=StringIO(), stderr=StringIO())
self.assertEqual(models.Poll.history.all().count(), 0)
def test_multi_table(self):
data = {'rating': 5, 'name': "Tea '<NAME>"}
models.Restaurant.objects.create(**data)
models.Restaurant.updates.all().delete()
management.call_command(self.command_name, 'tests.restaurant',
stdout=StringIO(), stderr=StringIO())
update_record = models.Restaurant.updates.all()[0]
for attr, value in data.items():
self.assertEqual(getattr(update_record, attr), value)
def test_existing_objects(self):
data = {'rating': 5, 'name': "Tea '<NAME>"}
out = StringIO()
models.Restaurant.objects.create(**data)
pre_call_count = models.Restaurant.updates.count()
management.call_command(self.command_name, 'tests.restaurant',
stdout=StringIO(), stderr=out)
self.assertEqual(models.Restaurant.updates.count(), pre_call_count)
self.assertIn(populate_history.Command.EXISTING_HISTORY_FOUND,
out.getvalue())
def test_no_historical(self):
out = StringIO()
with replace_registry():
management.call_command(self.command_name, auto=True,
stdout=out)
self.assertIn(populate_history.Command.NO_REGISTERED_MODELS,
out.getvalue())
| 2.25 | 2 |
topologic/statistics/degree_centrality.py | microsoft/topologic | 24 | 12788545 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import networkx as nx
import numpy as np
from .defined_histogram import DefinedHistogram
from typing import List, Union
from .make_cuts import MakeCuts, filter_function_for_make_cuts
def histogram_degree_centrality(
graph: nx.Graph,
bin_directive: Union[int, List[Union[float, int]], np.ndarray, str] = 10
) -> DefinedHistogram:
"""
Generates a histogram of the vertex degree centrality of the provided graph.
Histogram function is fundamentally proxied through to numpy's `histogram` function, and bin selection
follows `numpy.histogram` processes.
:param networkx.Graph graph: the graph. No changes will be made to it.
:param bin_directive: Is passed directly through to numpy's
"histogram" (and thus, "histogram_bin_edges") functions.
See: https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.histogram_bin_edges.html#numpy.histogram_bin_edges
In short description: if an int is provided, we use `bin_directive` number of equal range bins.
If a sequence is provided, these bin edges will be used and can be sized to whatever size you prefer
Note that the np.ndarray should be ndim=1 and the values should be float or int.
:type bin_directive: Union[int, List[Union[float, int]], numpy.ndarray, str]
:return: A named tuple that contains the histogram and the bin_edges used in the histogram
:rtype: DefinedHistogram
""" # noqa:501
degree_centrality_dict = nx.degree_centrality(graph)
histogram, bin_edges = np.histogram(
list(degree_centrality_dict.values()),
bin_directive
)
return DefinedHistogram(histogram=histogram, bin_edges=bin_edges)
def cut_vertices_by_degree_centrality(
graph: nx.Graph,
cut_threshold: Union[int, float],
cut_process: MakeCuts
) -> nx.Graph:
"""
Given a graph and a cut_threshold and a cut_process, return a copy of the graph with the vertices outside of the
cut_threshold.
:param networkx.Graph graph: The graph that will be copied and pruned.
:param cut_threshold: The threshold for making cuts based on degree centrality.
:type cut_threshold: Union[int, float]
:param MakeCuts cut_process: Describes how we should make the cut; cut all edges larger or smaller than the
cut_threshold, and whether exclusive or inclusive.
:return: Pruned copy of the graph
:rtype: networkx.Graph
"""
graph_copy = graph.copy()
degree_centrality_dict = nx.degree_centrality(graph_copy)
filter_by = filter_function_for_make_cuts(cut_threshold, cut_process)
vertices_to_cut = list(filter(filter_by, degree_centrality_dict.items()))
for vertex, degree_centrality in vertices_to_cut:
graph_copy.remove_node(vertex)
return graph_copy
| 3.5625 | 4 |
dexp/processing/utils/nan_to_zero.py | haesleinhuepf/dexp | 16 | 12788546 | from dexp.utils import xpArray
from dexp.utils.backends import Backend, CupyBackend, NumpyBackend
def nan_to_zero(array: xpArray, copy: bool = True) -> xpArray:
"""
Replaces every nan in an array to zero. It might, or not, be able to operate in-place.
To be safe, the returned array should always be used...
Parameters
----------
array : array to replace NaNs with zeros.
copy : True/False to suggest whether copy or in-place behaviour should occur.
Returns
-------
Array for which NaNs have been replace by zero.
"""
# TODO: should we remove this function?
backend = Backend.current()
if type(backend) is NumpyBackend:
xp = backend.get_xp_module()
return xp.nan_to_num(array, copy=copy)
elif type(backend) is CupyBackend:
import cupy
return cupy.nan_to_num(array)
| 3.09375 | 3 |
Utils.py | Vrroom/IRL | 2 | 12788547 | <reponame>Vrroom/IRL<gh_stars>1-10
""" Utility functions """
import torch
import numpy as np
def computeReturns(R, gamma, normalize=False) :
""" Compute discounted returns """
g = 0
G = []
for r in R[::-1] :
g = g * gamma + r
G.insert(0, g)
G = np.array(G)
if normalize :
G = (G - G.mean()) / (G.std() + 1e-7)
return G
def inRange(a, interval) :
"""
Check whether a number is in the given interval.
"""
lo, hi = interval
return a >= lo and a < hi
| 2.78125 | 3 |
pkg/agents/team4/trainingAgent/getLifeExpectancy.py | SOMAS2021/SOMAS2021 | 13 | 12788548 | import sys
import json
log_file_name = sys.argv[1]
number_of_agents = sys.argv[2]
agent_config_file_name = sys.argv[3]
best_agents_file_name = sys.argv[4]
current_iteration = sys.argv[5]
# reading contents of logfile
log_file = open(log_file_name, 'r')
lines = log_file.readlines()
log_file.close()
# convert log entries to json objects
logs = []
for i in range(len(lines)):
logs.append(json.loads(lines[i]))
# create dict pairing agent types with a list of their lifespans -- {TeamX: [1,2,3..], TeamY: [1,2,3...]}
agents = {}
# {"agentAge":33,"agentID":"506f71ff-0b92-4eb5-8fea-1750cb2d44f5","agentType":"Team5","level":"info","msg":"Agent survives till the end of the simulation","reporter":"simulation","time":"2022-01-03T11:17:44Z"}
# going through every log entry
for i in logs:
try:
# Check when agent dies -- append days lived
if i['msg'] == "Killing agent":
if i['agent_type'] not in agents: # creates new entry if the log does not already exist
agents[i['agent_type']] = []
agents[i['agent_type']].append(i["daysLived"])
# Check when agent survives -- append days lived
elif i['msg'] == "Agent survives till the end of the simulation":
if i['agent_type'] not in agents: # creates new entry if the log does not already exist
agents[i['agent_type']] = []
agents[i['agent_type']].append(i["agentAge"])
except:
continue
avgs = {}
# avg life expectancy of all agents globally
avg_of_all_agents = 0
# avg life expectancy of all agents not incl Team 4
avg_all_other_agents = 0
# number of agents not of Team 4
number_of_other_agents = 0
# go through each agent type
for agent in agents:
# get avg life expectancy per agent type and store
avg = sum(agents[agent])/len(agents[agent])
avgs[agent] = avg
# increase global life exp
avg_of_all_agents += sum(agents[agent])
if agent != "Team4":
# count number of agents not Team 4
number_of_other_agents += len(agents[agent])
# increase global life exp of not team 4
avg_all_other_agents += sum(agents[agent])
avg_of_all_agents /= sum([len(agents[a]) for a in agents])
avg_all_other_agents /= number_of_other_agents
avg_days_lived = avgs["Team4"]
print(str(avg_of_all_agents)+";"+str(avg_days_lived)+";"+str(avg_all_other_agents))
# read best_agent file
best_agent_json_file = open(best_agents_file_name)
best_agent_json = json.load(best_agent_json_file)
best_agent_json_file.close()
try:
# read agent at current_iteration+1
next_agent = best_agent_json[int(current_iteration)]
# print("Changing agent config to: {0}".format(next_agent))
# pass agent to agent_config file to create population for next run
agent_config_file = open(agent_config_file_name, 'w')
agent_config_file.write(json.dumps(next_agent, indent=4))
agent_config_file.close()
except:
pass
| 3 | 3 |
Medium/0018. 4Sum/0018. 4Sum_still_slow.py | FlyProbe/LeetCode-Solutions | 1 | 12788549 | class Solution:
def fourSum(self, nums: List[int], target: int) -> List[List[int]]:
l = len(nums)
res = set() # 用set来处理重复的组合
nums.sort()
pre_i = None
for i in range(l-3):
if nums[i] == pre_i: # 跳过重复的固定数字
continue
pre_i = nums[i]
pre_j = None
for j in range(i+1, l-2):
if nums[j] == pre_j:
continue
pre_j = nums[j]
m = j+1
n = l-1
while m < n:
temp = nums[i] + nums[j] + nums[m] + nums[n]
# if nums[i] + nums[j] + nums[m] + nums[n] < target:
# m += 1
# elif nums[i] + nums[j] + nums[m] + nums[n] > target:
# n -= 1
# 反复求和在大列表里很占用时间
if temp < target:
m += 1
elif temp > target:
n -= 1
else:
res.add((nums[i], nums[j], nums[m], nums[n]))
m += 1
n -= 1
sol = []
for item in res:
sol.append(list(item))
return sol
| 2.828125 | 3 |
packages/lintol/capstone/examples/processors/boundary_checker_impr.py | lintol/capstone | 0 | 12788550 | """Boundary checking (improved)
This function will (hopefully!) find if data in a csv file is contained within Northern Ireland.
If not so, this will be reported back to the user.
For now, please make sure that the second geojson in the argument is a boundary of Northern Ireland.
"""
import shapely
from geojson_utils import point_in_multipolygon
import logging
import json
from dask.threaded import get
import pandas as p
import geopandas as gp
import csv
import sys
import os
from ltldoorstep.processor import DoorstepProcessor
from ltldoorstep.reports import report
DEFAULT_OUTLINE = 'example/data/osni-ni-outline-lowres.geojson'
def find_ni_data(first_file, rprt, metadata=None):
ni_data = DEFAULT_OUTLINE
if metadata and 'definition' in metadata:
if metadata and \
'configuration' in metadata and \
'boundary' in metadata['configuration'] and \
metadata['configuration']['boundary'].startswith('$->'):
boundary_key = metadata['configuration']['boundary'][3:]
if not 'supplementary' in metadata or boundary_key not in metadata['supplementary']:
raise RuntimeError("Boundary not found in supplementary data")
boundary = metadata['supplementary'][boundary_key]
ni_data = boundary['location']
rprt.add_supplementary('boundary', boundary['source'], 'Boundary against which points are tested')
if not os.path.exists(ni_data):
raise RuntimeError("Boundary not found on filesystem")
with open(first_file) as data_file:
# Setting up data that will be compared to the dataset/file being passed in
data_to_compare = gp.GeoDataFrame.from_features(json.load(data_file)['features'])
rprt.set_properties(preset='geojson', headers=list(data_to_compare.columns))
# If csv file has these attributes then...
if 'geometry' in data_to_compare:
# This is what we are comparing the first csv/json file to - contains data of NI.
ni_compare_data = gp.read_file(ni_data)
# Multipolyon var is set to the first index of ni_compare_data with the key 'geometry'
multipolygon = ni_compare_data.ix[0]['geometry']
# points var is set with data_to_compare with the key 'geometry'
points = data_to_compare['geometry']
# outside_points is set to data that is not in multipolygon - this is values outside NI?
outside_points = data_to_compare[[not multipolygon.contains(p) for p in points]]
# inside_points_ct is set the sum of the length of points minus outside points
inside_points_ct = len(points) - len(outside_points)
# If outside points are not empty then....
if not outside_points.empty:
# Iterating through index and points in outside points
for ix, point in outside_points.iterrows():
geopoint = shapely.geometry.mapping(point['geometry'])
# props is set to a dictonary object of point
props = dict(point)
# removing key 'geometry'
del props['geometry']
# calling Report object method add_issue
rprt.add_issue(
logging.ERROR,
'locations-not-found',
_("This location is not within the given boundary"),
item_index=ix,
item=geopoint,
item_type=geopoint['type'],
item_properties=props
)
# If the file does not have any location data....
else:
rprt.add_issue(
'lintol/boundary-checker-improved:1',
logging.WARNING,
'no-location-data-found',
_("No location data found! Please make sure that you have read the right file")
)
return rprt
class BoundaryCheckerImprovedProcessor(DoorstepProcessor):
@staticmethod
def make_report():
return report.GeoJSONReport("GeoJSON Boundary Processor", "Info from GeoJSON Processor - example info")
def get_workflow(self, filename, metadata={}):
workflow = {
'output': (find_ni_data, filename, self.make_report(), metadata)
}
return workflow
processor = BoundaryCheckerImprovedProcessor
if __name__ == "__main__":
argv = sys.argv
processor = BoundaryCheckerImprovedProcessor()
workflow = processor.get_workflow(argv[1])
print(compile_report(filename, metadata))
| 3.125 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.