max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
gpypi/__init__.py | tastuteche/g-pypi-py3 | 1 | 12798951 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pkg_resources
__version__ = pkg_resources.get_distribution('g-pypi').version.replace('dev', '')
| 1.382813 | 1 |
Codes/optimization/src/preprocess/calibration.py | dychen24/magx | 7 | 12798952 | <reponame>dychen24/magx
import numpy as np
from .data_reader import read_data
def calibrate(path):
data = read_data(path)
# cut = int(data.shape[0]/10)
# data = data[cut: -cut]
nsensor = int(data.shape[1] / 3)
offX = np.zeros(nsensor)
offY = np.zeros(nsensor)
offZ = np.zeros(nsensor)
scaleX = np.zeros(nsensor)
scaleY = np.zeros(nsensor)
scaleZ = np.zeros(nsensor)
for i in range(nsensor):
mag = data[:, i * 3:i * 3 + 3]
H = np.array([mag[:, 0], mag[:, 1], mag[:, 2], - mag[:, 1]
** 2, - mag[:, 2] ** 2, np.ones_like(mag[:, 0])]).T
w = mag[:, 0] ** 2
tmp = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T)
X = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T).dot(w)
# print(X.shape)
offX[i] = X[0] / 2
offY[i] = X[1] / (2 * X[3])
offZ[i] = X[2] / (2 * X[4])
temp = X[5] + offX[i] ** 2 + X[3] * offY[i]**2 + X[4] * offZ[i] ** 2
scaleX[i] = np.sqrt(temp)
scaleY[i] = np.sqrt(temp / X[3])
scaleZ[i] = np.sqrt(temp / X[4])
offset = np.stack([offX, offY, offZ], axis=0).T
offset = offset.reshape(1, -1)
scale = np.stack([scaleX, scaleY, scaleZ], axis=0).T
scale = scale.reshape(1, -1)
return [offset, scale]
| 2.34375 | 2 |
my_test/calc.py | irux/test-bump | 0 | 12798953 | def summcalc(a,b):
return a+b | 1.601563 | 2 |
setup.py | PontusHultkrantz/tcapy | 0 | 12798954 | <filename>setup.py
from setuptools import setup, find_packages
long_description = """tcapy is a transaction cost analysis library for determining calculating your trading costs"""
setup(name='tcapy',
version='0.1.0',
description='Tranasction cost analysis library',
author='<NAME>',
author_email='<EMAIL>',
license='Apache 2.0',
long_description=long_description,
keywords=['pandas', 'TCA', 'transaction cost analysis'],
url='https://github.com/cuemacro/tcapy',
packages=find_packages(),
include_package_data=True,
install_requires=[],
zip_safe=False)
| 1.398438 | 1 |
gitScrabber/scrabTasks/report/easeOfUseEstimation.py | Eyenseo/gitScrabber | 0 | 12798955 | <reponame>Eyenseo/gitScrabber<filename>gitScrabber/scrabTasks/report/easeOfUseEstimation.py
"""
The MIT License (MIT)
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from ..scrabTask import ReportTask
from utils import containedStructure
name = "EaseOfUseEstimation"
version = "1.0.0"
class MissingManualData(Exception):
"""
Helper class to filter specific exceptions
"""
pass
class Limit():
"""
Convenience class to hold the upper and lower limits of the project sizes
:param name: The name
:param lower: The lower
:param upper: The upper
"""
def __init__(self, name, lower, upper):
self.name = name
self.lower = lower
self.upper = upper
class ScoreData():
"""
Convenience class that gathers the necessary data for the ease of use
estimation for the projects.
:param project_report: The project report to gather the data from
"""
def __init__(self, project_report):
self.website = None
self.download = None
self.readme = None
self.apis = None
self.examples = None
self.explanations = None
self.high = None
self.low = None
self.__gather_existence(project_report)
self.__gather_completness(project_report)
self.__gather_interface_level(project_report)
def __gather_existence(self, project_report):
"""
Gathers the documentation existence data
:param project_report: The project report to gather the data from
"""
required = {'generalData': {'documentation': {
'exists': {'readme': False,
'website': False,
'download': False}}}}
if (not containedStructure(required, project_report)):
raise MissingManualData(
"Data for the ease of use calculation is missing - "
"generalData.documentation.exists.*"
)
exists = project_report['generalData']['documentation']['exists']
self.download = exists['download']
self.readme = exists['readme']
self.website = exists['website']
def __gather_completness(self, project_report):
"""
Gathers the documentation completeness data
:param project_report: The project report to gather the data from
"""
required = {'generalData': {'documentation': {
'completeness': {'apis': False,
'examples': False,
'explanations': False}}}}
if (not containedStructure(required, project_report)):
raise MissingManualData(
"Data for the ease of use calculation is missing - "
"generalData.documentation.completeness.*"
)
comp = project_report['generalData']['documentation']['completeness']
self.apis = comp['apis']
self.examples = comp['examples']
self.explanations = comp['explanations']
def __gather_interface_level(self, project_report):
"""
Gathers the projects interface level data
:param project_report: The project report to gather the data from
"""
required = {'generalData': {'interfaceLevel': {'high': False,
'low': False}}}
if (not containedStructure(required, project_report)):
raise MissingManualData(
"Data for the ease of use calculation is missing - "
"generalData.interfaceLevel.*"
)
inter = project_report['generalData']['interfaceLevel']
self.high = inter['high']
self.low = inter['low']
class Weight(object):
"""
Convenience class that holds all weights needed for the ease of use
estimation
"""
def __init__(self):
self.website = 3
self.download = 2
self.readme = 1
self.apis = 3
self.examples = 2
self.explanations = 1
self.high_low = 6
self.high = 5
self.low = 1
class EaseOfUseEstimation(ReportTask):
"""
Class to estimate how easy it is to use a project.
This is based on the following data:
manual:
generalData:
documentation:
exists:
readme: false
website: false
download: false
completeness:
apis: false
examples: false
explanations: false
interfaceLevel:
high: true
low: true
Example:
EaseOfUseEstimation: difficult
:param parameter: Parameter given explicitly for this task, for all
projects, defined in the task.yaml
:param global_args: Arguments that will be passed to all tasks. They
_might_ contain something that is useful for the task,
but the task has to check if it is _there_ as these
are user provided. If they are needed to work that
check should happen in the argHandler.
"""
def __init__(self, parameter, global_args):
super(EaseOfUseEstimation, self).__init__(name, version, parameter,
global_args)
self.__limits = [
Limit(name='easy', upper=24, lower=17),
Limit(name='normal', upper=16, lower=9),
Limit(name='difficult', upper=8, lower=1)
]
self.__weight = Weight()
def __estimate_ease_of_use(self, project_report):
"""
Estimates how easy it is to use a project.
The basic formula is existence * (completeness + level)
:param project_report: The project report containing the data to
base the estimation on
:returns: The ease of use score
"""
data = ScoreData(project_report)
score = (
data.apis*self.__weight.apis
+ data.examples*self.__weight.examples
+ data.explanations*self.__weight.explanations
)
if data.high and data.low:
score += self.__weight.high_low
elif data.high:
score += self.__weight.high
elif data.low:
score += self.__weight.low
if data.website:
score *= self.__weight.website
elif data.download:
score *= self.__weight.download
elif data.readme:
score *= self.__weight.readme
return score
def scrab(self, report):
"""
The scrab task estimates how easy it is to use a project.
:param report: The report to analyse _and_ change
:returns: Report that contains all scrabbed information
Example:
EaseOfUseEstimation: difficult
"""
projects = report['projects']
try:
for project in projects:
project_report = projects[project]
eou_score = self.__estimate_ease_of_use(project_report)
if eou_score is None:
continue
for limit in self.__limits:
if(eou_score <= limit.upper
and eou_score >= limit.lower):
projects[project]['EaseOfUseEstimation'] = limit.name
except Exception as e:
raise Exception(
"While estimating ease of use for the project '{}' with "
"the report\n{}".format(
project,
projects[project])
) from e
return report
| 1.5625 | 2 |
neuro_evolutional_net/genetic_algorithm.py | DominikSpiljak/Fuzzy-Evolutionary-and-Neuro-computing | 0 | 12798956 | from individual import Individual
class GeneticAlgorithm:
def __init__(self, population_generation, num_iter, selection, combination, mutation, solution, goal_error):
self.population_generation = population_generation
self.num_iter = num_iter
self.selection = selection
self.combination = combination
self.mutation = mutation
self.solution = solution
self.goal_error = goal_error
def evolution(self, neural_net):
# Template method
population = self.population_generation()
min_error = self.solution(population).error
for i in range(self.num_iter):
population, comb_population = self.selection(population)
combined_population = self.combination(comb_population)
mutated_population = self.mutation(combined_population)
population.extend(mutated_population)
iteration_min_error = self.solution(population).error
if i % 1000 == 0:
if iteration_min_error < min_error:
min_error = iteration_min_error
print("Found new best, iteration = {}, {}".format(
i, self.solution(population)))
if iteration_min_error < self.goal_error:
print("Reached goal error, terminating.")
break
return self.solution(population)
| 3.171875 | 3 |
pyanimate/__init__.py | martinmcbride/sympl | 0 | 12798957 | name = "pyanimate"
| 1.0625 | 1 |
server/app/api/lib/notify.py | Lazarus118/virtual-queue | 0 | 12798958 | import twilio
import twilio.rest
class Notify(object):
def __init__(self, account_sid, auth_token, phone_number):
self.account_sid = account_sid
self.auth_token = auth_token
self.phone_number = phone_number
def _request(self, to_number, message):
try:
client = twilio.rest.TwilioRestClient(self.account_sid, self.auth_token)
message = client.messages.create(
body=message,
to=to_number,
from_=self.phone_number
)
except twilio.TwilioRestException as e:
print e
def send_message(self, to_number, message):
return self._request(to_number, message)
| 2.53125 | 3 |
custom_components/fritzbox_tools/binary_sensor.py | jloehr/ha-fritzbox-tools | 0 | 12798959 | <reponame>jloehr/ha-fritzbox-tools
"""AVM Fritz!Box connectivitiy sensor"""
import logging
from collections import defaultdict
import datetime
try:
from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorEntity
except ImportError:
from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorDevice as BinarySensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import DATA_FRITZ_TOOLS_INSTANCE, DOMAIN
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = datetime.timedelta(seconds=60)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
_LOGGER.debug("Setting up sensors")
fritzbox_tools = hass.data[DOMAIN][DATA_FRITZ_TOOLS_INSTANCE][entry.entry_id]
if "WANIPConn1" in fritzbox_tools.connection.services:
""" We do not support repeaters at the moment """
async_add_entities([FritzBoxConnectivitySensor(fritzbox_tools)], True)
return True
class FritzBoxConnectivitySensor(BinarySensorEntity):
name = "FRITZ!Box Connectivity"
icon = "mdi:router-wireless"
device_class = "connectivity"
def __init__(self, fritzbox_tools):
self.fritzbox_tools = fritzbox_tools
self.entity_id = ENTITY_ID_FORMAT.format(f"fritzbox_{self.fritzbox_tools.fritzbox_model}_connectivity")
self._is_on = True # We assume the fritzbox to be online initially
self._is_available = (
True # set to False if an error happend during toggling the switch
)
self._attributes = defaultdict(str)
super().__init__()
@property
def is_on(self) -> bool:
return self._is_on
@property
def unique_id(self):
return f"{self.fritzbox_tools.unique_id}-{self.entity_id}"
@property
def device_info(self):
return self.fritzbox_tools.device_info
@property
def available(self) -> bool:
return self._is_available
@property
def device_state_attributes(self) -> dict:
return self._attributes
async def _async_fetch_update(self):
self._is_on = True
try:
if "WANCommonInterfaceConfig1" in self.fritzbox_tools.connection.services:
connection = lambda: self.fritzbox_tools.connection.call_action("WANCommonInterfaceConfig1", "GetCommonLinkProperties")["NewPhysicalLinkStatus"]
is_up = await self.hass.async_add_executor_job(connection)
self._is_on = is_up == "Up"
else:
self._is_on = self.hass.async_add_executor_job(self.fritzbox_tools.fritzstatus.is_connected)
self._is_available = True
status = self.fritzbox_tools.fritzstatus
uptime_seconds = await self.hass.async_add_executor_job(lambda: getattr(status, "uptime"))
last_reconnect = datetime.datetime.now() - datetime.timedelta(seconds=uptime_seconds)
self._attributes["last_reconnect"] = last_reconnect.replace(microsecond=0).isoformat()
for attr in [
"modelname",
"external_ip",
"external_ipv6",
]:
self._attributes[attr] = await self.hass.async_add_executor_job(lambda: getattr(status, attr))
except Exception:
_LOGGER.error("Error getting the state from the FRITZ!Box", exc_info=True)
self._is_available = False
async def async_update(self) -> None:
_LOGGER.debug("Updating Connectivity sensor...")
await self._async_fetch_update()
| 2.046875 | 2 |
solutions/python3/667.py | sm2774us/amazon_interview_prep_2021 | 42 | 12798960 | class Solution:
def constructArray(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[int]
"""
left, right, res = 0, n+1, [None]*n
for i in range(n):
if k == 1:
if i%2 == 0:
while i<n: res[i], right, i = right - 1, right - 1, i + 1
else:
while i<n: res[i], left, i = left + 1, left + 1, i + 1
return res
else:
if i%2 != 0: res[i], right = right - 1, right - 1
else: res[i], left = left + 1, left + 1
if i != 0: k -= 1 | 2.96875 | 3 |
mozi/layers/normalization.py | hycis/Mozi | 122 | 12798961 | <gh_stars>100-1000
from mozi.layers.template import Template
from mozi.utils.theano_utils import shared_zeros, sharedX, shared_ones
from mozi.weight_init import UniformWeight
import theano.tensor as T
import theano
class BatchNormalization(Template):
def __init__(self, dim, layer_type, gamma_init=UniformWeight(), short_memory=0.01):
'''
REFERENCE:
Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift
PARAMS:
short_memory: short term memory
y_t is the latest value, the moving average x_tp1 is calculated as
x_tp1 = memory * y_t + (1-memory) * x_t, the larger the short term
memory, the more weight is put on contempory.
layer_type: fc or conv
epsilon:
denominator min value for preventing division by zero in computing std
dim: for fc layers, shape is the layer dimension, for conv layers,
shape is the number of feature maps
'''
assert layer_type in ['fc', 'conv']
self.layer_type = layer_type
self.epsilon = 1e-6
self.dim = dim
self.mem = short_memory
if self.layer_type == 'fc':
input_shape = (1, dim)
self.broadcastable = (True, False)
elif self.layer_type == 'conv':
input_shape = (1, dim, 1, 1)
self.broadcastable = (True, False, True, True)
self.gamma = gamma_init(input_shape, name='gamma')
self.beta = shared_zeros(input_shape, name='beta')
self.params = [self.gamma, self.beta]
self.moving_mean = 0
self.moving_var = 1
def _train_fprop(self, state_below):
if self.layer_type == 'fc':
miu = state_below.mean(axis=0)
var = T.mean((state_below - miu)**2, axis=0)
elif self.layer_type == 'conv':
miu = state_below.mean(axis=(0,2,3), keepdims=True)
var = T.mean((state_below - miu)**2, axis=(0,2,3), keepdims=True)
self.moving_mean = self.mem * miu + (1-self.mem) * self.moving_mean
self.moving_var = self.mem * var + (1-self.mem) * self.moving_var
Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var + self.epsilon)
gamma = T.patternbroadcast(self.gamma, self.broadcastable)
beta = T.patternbroadcast(self.beta, self.broadcastable)
return gamma * Z + beta
def _test_fprop(self, state_below):
Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var + self.epsilon)
gamma = T.patternbroadcast(self.gamma, self.broadcastable)
beta = T.patternbroadcast(self.beta, self.broadcastable)
return gamma * Z + beta
def _layer_stats(self, state_below, layer_output):
return [('moving_mean', T.mean(self.moving_mean)),
('moving_std', T.mean(self.moving_var)),
('gamma_mean', T.mean(self.gamma)),
('beta_mean', T.mean(self.beta)),
('gamma_max', T.max(self.gamma))]
# class LRN(Template):
# """
# Adapted from pylearn2
# Local Response Normalization
# """
#
# def __init__(self, n=5, alpha=0.0001, beta=0.75, k=2):
# super(LRN, self).__init__()
# self.n = n
# self.alpha = alpha
# self.beta = beta
# self.k = k
# assert self.n % 2 == 1, 'only odd n is supported'
#
# def _train_fprop(self, state_below):
# half = self.n / 2
# sq = T.sqr(state_below)
# b, ch, r, c = state_below.shape
# extra_channels = T.alloc(0., b, ch + 2*half, r, c)
# sq = T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq)
# scale = self.k
#
# for i in xrange(self.n):
# scale += self.alpha * sq[:,i:i+ch,:,:]
#
# scale = scale ** self.beta
# return state_below / scale
#
# def _test_fprop(self, state_below):
# return self._train_fprop(state_below)
| 2.6875 | 3 |
task1/learning.py | EgorOrachyov/MachineLearning | 0 | 12798962 | from csv import reader
from sklearn import preprocessing
from plotly import graph_objects
def import_data(path):
return [[float(f) for f in r] for r in reader(open(path, "r"))]
def normalize_data(dataset):
scaler = preprocessing.MinMaxScaler(feature_range=(0,1))
normalized = scaler.fit_transform(dataset)
return normalized.tolist()
def cross_validation_split(dataset, cross_validation_k):
size = len(dataset)
splits = [int(size / cross_validation_k) for _ in range(cross_validation_k)]
splits[cross_validation_k - 1] = size - sum(splits[0:cross_validation_k-1])
sets = list()
offset = 0
for s in splits:
sets.append([dataset[i] for i in range(offset, offset + s)])
offset += s
return sets
def compute_y(model, row):
m = len(row)
y = 0.0
for i in range(m):
y += row[i] * model[i]
return y + model[m]
def compute_grad_mse(model, row, actual):
m = len(row)
grad = [0.0 for _ in range(m + 1)]
diff = compute_y(model, row) - actual
for i in range(m):
grad[i] += 2.0 * row[i] * diff
grad[m] += 2.0 * diff
return grad
def learn_stochastic_gradient_decent_mse(fold, actuals, learning_rate, iterations_count):
m = len(fold[0])
model = [0.0 for _ in range(m + 1)]
for i in range(iterations_count):
for j in range(len(fold)):
row = fold[j]
actual = actuals[j]
grad = compute_grad_mse(model, row, actual)
model = [model[k] - learning_rate / (1 + i) * grad[k] for k in range(m + 1)]
return model
def compute_rmse(prediction, actual):
mse = 0.0
n = len(prediction)
for i in range(n):
mse += ((prediction[i] - actual[i]) ** 2) / float(n)
return mse ** 0.5
def compute_r2(prediction, actual):
nominator = 0.0
denominator = 0.0
expect = 0.0
for i in range(len(actual)):
nominator += (actual[i] - prediction[i]) ** 2
for i in range(len(actual)):
expect += actual[i] / float(len(actual))
for i in range(len(actual)):
denominator += (actual[i] - expect) ** 2
return 1 - float(nominator) / float(denominator)
def run_learning(sets, learning_rate, iterations_count):
models = []
rmses = []
r2s = []
rmses_test = []
r2s_test = []
for set in sets:
fold = list(sets)
fold.remove(set)
fold = sum(fold, [])
test = set
fold_actual = [r[-1] for r in fold]
fold = [r[0:-1] for r in fold]
test_actual = [r[-1] for r in test]
test = [r[0:-1] for r in test]
model = learn_stochastic_gradient_decent_mse(fold, fold_actual, learning_rate, iterations_count)
fold_pred = [compute_y(model, row) for row in fold]
test_pred = [compute_y(model, row) for row in test]
rmses.append(compute_rmse(fold_pred, fold_actual))
r2s.append(compute_r2(fold_pred, fold_actual))
rmses_test.append(compute_rmse(test_pred, test_actual))
r2s_test.append(compute_r2(test_pred, test_actual))
models.append(model)
return models, rmses, r2s, rmses_test, r2s_test
def compute_stat(data):
n = len(data)
expectation = 0.0
for d in data:
expectation += d / float(n)
sd = 0.0
for d in data:
sd += ((d - expectation) ** 2) / float(n)
return expectation, sd ** 0.5
filepath = "features_var_1.csv"
cv_k = 5
learn_rate = 0.01
iterations = 50
dataset = normalize_data(import_data(filepath))
sets = cross_validation_split(dataset, cv_k)
models, rmse_train, r2_train, rmse_test, r2_test = run_learning(sets, learn_rate, iterations)
models_tr = [[models[i][j] for i in range(cv_k)] for j in range(len(dataset[0]))]
stats = [compute_stat(data) for data in [rmse_train, r2_train, rmse_test, r2_test] + models_tr]
values = ["X"] + ["Fold" + str(i) for i in range(cv_k)] + ["E","SD"]
cells = [ ["RMSE (train)", "R2 (train)", "RMSE (test)", "R2 (test)"] + ["f" + str(i) for i in range(len(dataset[0]))] ] + \
[ [rmse_train[i], r2_train[i], rmse_test[i], r2_test[i]] + models[i] for i in range(cv_k) ] + \
[ [stats[j][i] for j in range(len(stats))] for i in range(2) ]
table = graph_objects.Table(header=dict(values=values), cells=dict(values=cells))
figure = graph_objects.Figure(data=[table])
figure.show()
| 2.90625 | 3 |
Week 02/summationerror_BSW.py | bswood9321/PHYS-3210 | 0 | 12798963 | <reponame>bswood9321/PHYS-3210
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import matplotlib.pyplot as plt
N = int(input("Pick a number: "))
n, a, sum1, m, b, sum2 = 1, 0, 0, N, 0, 0
while (n<=N):
a = 1/n
sum1+=a
n = n+1
while (m>=1):
b = 1/m
sum2+=b
m = m-1
error = (sum1 - sum2)/(np.absolute(sum1)+np.absolute(sum2))
print("For N = :",N,", the error is: ",error)
N1=1
Ns=[]
Errors=[]
while N1<=N:
n1, a1, sum11, m1, b1, sum21 = 1, 0, 0, N1, 0, 0
while (n1<=N1):
a1 = 1/n1
sum11+=a1
n1 = n1+1
while (m1>=1):
b1 = 1/m1
sum21+=b1
m1 = m1-1
Ns.append(N1)
error1 = (sum11 - sum21)/(np.absolute(sum11)+np.absolute(sum21))
Errors.append(error1)
N1=N1+1
plt.plot(Ns,Errors,'r-')
plt.xlabel("N")
plt.ylabel("Error(x10^-16)")
plt.title("N vs. Error")
plt.show()
print("The down summation is more precise because the error of the number is proportional to the number you are summing, therefore as the number decreases, there is less error, versus starting with a small number, and adding more error for every summation.")
| 3.453125 | 3 |
imix/models/vqa_models/lxmert/lxmert_task.py | linxi1158/iMIX | 0 | 12798964 | <gh_stars>0
from imix.models.builder import VQA_MODELS
import torch
import copy
'''
from transformers.modeling_bert import (
BertConfig,
BertEmbeddings,
BertEncoder,
# BertLayerNorm,
BertPreTrainedModel,
)
'''
from .lxmert import LXMERTForPretraining
from imix.models.vqa_models.base_model import BaseModel
from .lxmert_qa_answer_table import load_lxmert_qa
import json
from .lxmert import ClassificationModel
@VQA_MODELS.register_module()
class LXMERT(BaseModel):
def __init__(self, **kwargs):
super().__init__()
args = kwargs['params']
freeze_base = args['freeze_base']
training_head_type = args['training_head_type']
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if training_head_type == 'pretraining':
self.model = LXMERTForPretraining(**args)
self.forward_train = self.forward_train_pretrain
else:
self.model = ClassificationModel(**args)
pretrained_path = args['pretrained_path']
if pretrained_path is not None:
if training_head_type in ['vqa2', 'gqa']:
self.label2ans = json.load(open(args.label2ans_path))
load_lxmert_qa(pretrained_path, self.model, label2ans=self.label2ans)
elif training_head_type == 'nlvr2':
self.model.lxrt_encoder.load(pretrained_path)
if freeze_base:
for p in self.model.bert.parameters():
p.requires_grad = False
def forward_train(self, data, **kwargs):
# ques_id = data['ques_id'].to(self.device)
feats = data['feats'].to(self.device)
boxes = data['boxes'].to(self.device)
sent = data['ques']
target = data['target'].to(self.device)
output_dict = self.model(feats, boxes, sent)
model_output = {
'scores': output_dict['scores'],
'target': target,
}
return model_output
def forward_test(self, data):
model_output = self.forward_train(data)
return model_output
def forward_train_pretrain(self, data):
params = copy.deepcopy(data)
if params.get('feats') is not None and params.get('image_dim') is not None:
image_mask = (torch.arange(params['feats'].size(-2)).expand(*params['feats'].size()[:-1]))
if len(params['image_dim'].size()) < len(image_mask.size()):
params['image_dim'] = data['image_dim'].unsqueeze(-1)
assert len(params['image_dim'].size()) == len(image_mask.size())
image_mask = image_mask < params['image_dim']
params['visual_attention_mask'] = image_mask.long()
else:
params['visual_attention_mask'] = None
output_dict = self.model(
input_ids=params['input_ids'].cuda(),
token_type_ids=params['segment_ids'].cuda(),
attention_mask=params['input_mask'].cuda(),
visual_feats=params['feats'].cuda(),
visual_pos=params['pos'].cuda(),
visual_attention_mask=params['visual_attention_mask'].cuda()
if params['visual_attention_mask'] is not None else params['visual_attention_mask'],
)
target_dict = {
'masked_lm_labels': params['lm_label_ids'].cuda(),
'matched_label': params['is_matched'].cuda(),
'ans': params['ans'].cuda(),
'obj_labels': {
'obj': (params['det_obj_labels'].cuda(), params['det_obj_confs'].cuda()),
'attr': (params['det_attr_labels'].cuda(), params['det_attr_confs'].cuda()),
'feat': (params['det_feat'].cuda(), params['det_feat_mask'].cuda()),
}
}
model_output = {'scores': output_dict, 'target': target_dict}
return model_output
| 2.171875 | 2 |
srun.py | qaqcxh/lectureHack | 0 | 12798965 | #!/bin/python3
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
import sys,os
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-gpu')
browser = webdriver.Chrome(options=options)
browser.get("http://172.16.31.10/")
try:
browser.find_element_by_xpath('//*[@id="logout"]')
print('网络已连接!')
browser.quit()
sys.exit()
except NoSuchElementException:
pass
username_='2020xxxxxxxxxxx'
password_='<PASSWORD>'
# 输入用户名,密码
username = browser.find_element_by_xpath('//*[@id="username"]')
password = browser.find_element_by_xpath('//*[@id="password"]')
username.clear()
username.send_keys(username_)
password.clear()
password.send_keys(password_)
login_btn = browser.find_element_by_xpath('//*[@id="login-account"]')
login_btn.click()
try:
# 页面一直循环,直到显示连接成功
element = WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.XPATH, '//*[@id="logout"]'))
)
print("网络已连接!")
finally:
browser.quit()
browser.quit()
| 2.609375 | 3 |
users/migrations/0005_auto_20190911_2212.py | nhy17-thu/ImageProcessingWebsite | 1 | 12798966 | # Generated by Django 2.2.4 on 2019-09-11 14:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20190907_1334'),
]
operations = [
migrations.AddField(
model_name='pic',
name='classification152',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='pic',
name='classification18',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='pic',
name='transfer',
field=models.ImageField(blank=True, upload_to=''),
),
]
| 1.515625 | 2 |
project_2/scripts/graph.py | jsbruglie/adrc | 0 | 12798967 | <gh_stars>0
import prio_dict
class Graph:
def __init__(self):
self.vertices = {}
def __str__(self):
return str([key for key in self.vertices.keys()])
def __iter__(self):
return iter(self.vertices.values())
def add_vertex(self, node):
self.vertices[node] = {}
def get_vertex(self, node):
if node in self.vertices:
return self.vertices[node]
else:
return None
def get_vertices(self):
return self.vertices.keys()
def add_edge(self, src, dst, weight=0):
if src not in self.vertices:
self.add_vertex(src)
if dst not in self.vertices:
self.add_vertex(dst)
self.vertices[src][dst] = weight
def find_path(self, src, dst, path=None):
if path is None:
path = []
graph = self.vertices
path = path + [src]
if src == dst:
return path
if src not in graph:
return None
for v in graph[src]:
if v not in path:
temp = self.find_path(v, dst, path)
if temp:
return temp
return None
def find_all_paths(self, src, dst, path=None):
if path is None:
path = []
graph = self.vertices
path = path + [src]
if src == dst:
return [path]
if src not in graph:
return []
paths = []
for v in graph[src]:
if v not in path:
temp = self.find_all_paths(v, dst, path)
for p in temp:
paths.append(p)
return paths
def find_shortest_path(self, src, dst, path=None):
if path is None:
path = []
graph = self.vertices
path = path + [src]
if src == dst:
return path
if src not in graph:
return None
shortest_path = None
for v in graph[src]:
if v not in path:
temp = self.find_shortest_path(v, dst, path)
if temp:
if not shortest_path or len(temp) < len(shortest_path):
shortest_path = temp
return shortest_path
def is_connected(self, visited=None, src=None):
if visited is None:
visited = set()
graph = self.vertices
vertices = list(graph.keys())
if src is None:
src = vertices[0]
visited.add(src)
if len(visited) != len(vertices):
for v in graph[src]:
if v not in visited:
if self.is_connected(visited, v):
return True
else:
return True
return False
def Dijkstra(self, G, src, dst=None):
D = {}
P = {}
Q = prio_dict.PriorityDictionary()
Q[src] = 0
for v in Q:
D[v] = Q[v]
if v == dst:
break
for w in G[v]:
vw_length = D[v] + G[v][w]
if w in D:
if vw_length < D[w]:
raise ValueError
elif w not in Q or vw_length < Q[w]:
Q[w] = vw_length
P[w] = v
return (D,P)
def shortest_path(self, G, src, dst):
D,P = self.Dijkstra(G, src, dst)
path = []
while True:
path.append(dst)
if dst == src:
break
dst = P[dst]
path.reverse()
return path | 3.015625 | 3 |
algorithms/289. Game of Life.py | woozway/py3-leetcode | 1 | 12798968 | <reponame>woozway/py3-leetcode<gh_stars>1-10
"""
1. Clarification
2. Possible solutions
- Simulation v1
- Simulation optimised v2
3. Coding
4. Tests
"""
# T=O(m*n), S=O(m*n)
class Solution:
def gameOfLife(self, board: List[List[int]]) -> None:
neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)]
rows = len(board)
cols = len(board[0])
copy_board = [[board[row][col] for col in range(cols)] for row in range(rows)]
for row in range(rows):
for col in range(cols):
live_neighbors = 0
for neighbor in neighbors:
r = (row + neighbor[0])
c = (col + neighbor[1])
if (rows > r >= 0) and (cols > c >= 0) and (copy_board[r][c] == 1):
live_neighbors += 1
if copy_board[row][col] == 1 and (live_neighbors < 2 or live_neighbors > 3):
board[row][col] = 0
if copy_board[row][col] == 0 and live_neighbors == 3:
board[row][col] = 1
# T=O(m*n), S=O(1)
class Solution:
def gameOfLife(self, board: List[List[int]]) -> None:
neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)]
rows = len(board)
cols = len(board[0])
copy_board = [[board[row][col] for col in range(cols)] for row in range(rows)]
for row in range(rows):
for col in range(cols):
live_neighbors = 0
for neighbor in neighbors:
r = (row + neighbor[0])
c = (col + neighbor[1])
if (rows > r >= 0) and (cols > c >= 0) and (copy_board[r][c] == 1):
live_neighbors += 1
if copy_board[row][col] == 1 and (live_neighbors < 2 or live_neighbors > 3):
board[row][col] = 0
if copy_board[row][col] == 0 and live_neighbors == 3:
board[row][col] = 1
| 3.1875 | 3 |
src/dbxdeploy/package/PackageDependencyLoader.py | Kukuksumusu/dbx-deploy | 2 | 12798969 | <reponame>Kukuksumusu/dbx-deploy
import tomlkit
from tomlkit.items import Table
from typing import List
from pathlib import Path
from dbxdeploy.package.Dependency import Dependency
from dbxdeploy.package.RequirementsLineConverter import RequirementsLineConverter
from dbxdeploy.package.RequirementsGenerator import RequirementsGenerator
from dbxdeploy.package.RequirementsConfig import RequirementsConfig
class PackageDependencyLoader:
def __init__(
self,
requirements_line_converter: RequirementsLineConverter,
requirements_generator: RequirementsGenerator,
):
self.__requirements_generator = requirements_generator
self.__requirements_line_converter = requirements_line_converter
def load(self, project_base_dir: Path) -> List[Dependency]:
poetry_lock_path = project_base_dir.joinpath("poetry.lock")
poetry_lock_dependencies = self.__load_poetry_lock_dependencies(poetry_lock_path)
main_dependencies = self.__load_main_dependencies()
dependencies = []
for dependency in main_dependencies:
dependency_name = dependency[0]
dependencies.append(
Dependency(dependency_name, self.__find_poetry_lock_version_by_name(poetry_lock_dependencies, dependency_name))
)
return dependencies
def __load_main_dependencies(self) -> list:
requirements_config = RequirementsConfig()
requirements_config.exclude_index_info()
requirements_txt = self.__requirements_generator.generate(requirements_config)
return list(map(self.__requirements_line_converter.parse, requirements_txt.splitlines()))
def __load_poetry_lock_dependencies(self, lockfile_path: Path) -> List[Table]:
with lockfile_path.open("r") as f:
config = tomlkit.parse(f.read())
return [package for package in config["package"] if package["category"] == "main" and package["name"]]
def __find_poetry_lock_version_by_name(self, dependencies: List[Table], dependency_name: str) -> str:
for dependency in dependencies:
if dependency["name"].lower() == dependency_name.lower():
return dependency["version"]
raise Exception(f"Dependency {dependency_name} not found in poetry.lock")
| 1.945313 | 2 |
open_spiel/python/mfg/algorithms/EGTA/egta.py | wyz2368/open_spiel | 0 | 12798970 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" The trainer of EGTA for mean field game."""
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms.EGTA import meta_strategies
from open_spiel.python.mfg.algorithms.EGTA import inner_loop
from open_spiel.python.mfg.algorithms.EGTA import init_oracle
class MFGMetaTrainer(object):
"""
Empirical game-theoretic analysis (EGTA) for MFGs.
"""
def __init__(self,
mfg_game,
oracle_type,
num_inner_iters=None,
initial_policy=None,
meta_strategy_method="nash",
**kwargs):
"""
Initialize the MFG Trainer.
:param mfg_game: a mean-field game.
:param oracle_type: "BR" exact best response or "DQN" RL approximate best response.
:param num_inner_iters: the number of iterations for the inner loop (finding BR target based on the empirical game) if needed.
:param initial_policies: initial policies. Uniform policies by default.
:param meta_strategy_method: method for the inner loop.
"""
self._mfg_game = mfg_game
self._oracle_type = oracle_type
self._num_players = mfg_game.num_players()
self._num_inner_iters = num_inner_iters
self._initial_policy = initial_policy
self._meta_strategy_method = meta_strategy_method
self.initialize_policies_and_distributions()
#TODO: check if policy and dist are being updated.
self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[meta_strategy_method](mfg_game=mfg_game,
policies=self._policies,
distributions=self._distributions,
num_iterations=num_inner_iters)
self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method)
self._output_policy = None
self._current_outer_iter = 0
def initialize_policies_and_distributions(self):
"""
Initialize policies and corresponding distributions.
"""
if self._oracle_type == "BR":
self._oracle, self._policies, self._distributions = init_oracle.init_br_oracle(game=self._mfg_game,
initial_policy=self._initial_policy)
elif self._oracle_type == "DQN":
raise NotImplementedError
else:
raise ValueError("Suggested oracle has not been implemented.")
def reset(self):
"""
Reset the trainer.
"""
self._current_outer_iter = 0
self.initialize_policies_and_distributions()
self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[self._meta_strategy_method](mfg_game=self._mfg_game,
policies=self._policies,
distributions=self._distributions,
num_iterations=self._num_inner_iters)
self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method)
self._output_policy = None
def iteration(self):
"""
Main training iteration.
"""
self._current_outer_iter += 1
self._meta_strategy_method.reset()
self._output_policy = self._inner_loop.run_inner_loop()
self.update_policies(self._output_policy)
def final_step(self):
""" Final analysis of all generated policies. """
self._meta_strategy_method.reset()
self._output_policy = self._inner_loop.run_inner_loop()
def update_policies(self, output_merged_policy):
"""
Adding new best-response policies to the empirical game.
:param output_merged_policy: a merged policy induced by inner loop.
:return:
"""
output_distribution = distribution.DistributionPolicy(self._mfg_game, output_merged_policy)
greedy_pi = self._oracle(self._mfg_game, output_distribution)
self._policies.append(greedy_pi)
self._distributions.append(distribution.DistributionPolicy(self._mfg_game, greedy_pi))
def get_original_policies_and_weights(self):
"""
Return original policies in the empirical game and corresponding output mixed strategies.
"""
weights = self._meta_strategy_method.get_weights_on_orig_policies()
return self._policies, weights
def get_merged_policy(self):
"""
Return the output merged policy.
Equivalent to merge policies and weights from get_original_policies_and_weights().
"""
return self._output_policy
def get_policies(self):
return self._policies
def get_distrbutions(self):
return self._distributions
| 2.3125 | 2 |
OpenGLCffi/GLX/EXT/SGI/swap_control.py | cydenix/OpenGLCffi | 0 | 12798971 | <reponame>cydenix/OpenGLCffi<filename>OpenGLCffi/GLX/EXT/SGI/swap_control.py
from OpenGLCffi.GLX import params
@params(api='glx', prms=['interval'])
def glXSwapIntervalSGI(interval):
pass
| 1.609375 | 2 |
snsim/io_utils.py | bcarreres/snsim | 5 | 12798972 | <reponame>bcarreres/snsim
"""This module contains io stuff."""
import os
import warnings
import pickle
import pandas as pd
import numpy as np
try:
import json
import pyarrow as pa
import pyarrow.parquet as pq
json_pyarrow = True
except ImportError:
json_pyarrow = False
from . import salt_utils as salt_ut
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def write_sim(wpath, name, formats, header, data):
"""Write simulated lcs.
Parameters
----------
wpath : str
The path where to write file.
name : str
Simulation name.
formats : np.array(str)
List of files fopprmats to write.
header : dict
The simulation header.
data : pandas.DataFrame
Dataframe containing lcs.
Returns
-------
None
Just write files.
"""
# Export lcs as pickle
if 'pkl' in formats:
with open(wpath + name + '.pkl', 'wb') as file:
pkl_dic = {'name': name,
'lcs': data.to_dict(),
'meta': data.attrs,
'header': header}
pickle.dump(pkl_dic, file)
if 'parquet' in formats and json_pyarrow:
lcs = pa.Table.from_pandas(data)
lcmeta = json.dumps(data.attrs, cls=NpEncoder)
header = json.dumps(header, cls=NpEncoder)
meta = {'name'.encode(): name.encode(),
'attrs'.encode(): lcmeta.encode(),
'header'.encode(): header.encode()}
lcs = lcs.replace_schema_metadata(meta)
pq.write_table(lcs, wpath + name + '.parquet')
elif 'parquet' in formats and not json_pyarrow:
warnings.warn('You need pyarrow and json modules to use .parquet format', UserWarning)
def read_sim_file(file_path):
"""Read a sim file.
Parameters
----------
file_path : str
Path of the file.
Returns
-------
str, dict, pandas.DataFrame
The name, the header and the lcs of the simulation.
"""
file_path, file_ext = os.path.splitext(file_path)
if file_ext == '.pkl':
with open(file_path + file_ext, 'rb') as f:
pkl_dic = pickle.load(f)
lcs = pd.DataFrame.from_dict(pkl_dic['lcs'])
lcs.index.set_names(['ID', 'epochs'], inplace=True)
lcs.attrs = pkl_dic['meta']
name = pkl_dic['name']
header = pkl_dic['header']
elif file_ext == '.parquet':
if json_pyarrow:
table = pq.read_table(file_path + file_ext)
lcs = table.to_pandas()
lcs.set_index(['ID', 'epochs'], inplace=True)
lcs.attrs = {int(k): val
for k, val in json.loads(table.schema.metadata['attrs'.encode()]).items()}
name = table.schema.metadata['name'.encode()].decode()
header = json.loads(table.schema.metadata['header'.encode()])
else:
warnings.warn("You need pyarrow and json module to write parquet formats", UserWarning)
return name, header, lcs
def write_fit(sim_lcs_meta, fit_res, directory, sim_meta={}):
"""Write fit into a fits file.
Parameters
----------
sim_lcs_meta : dict{list}
Meta data of all lightcurves.
fit_res : list(sncosmo.utils.Result)
List of sncosmo fit results for each lightcurve.
directory : str
Destination of write file.
sim_meta : dict
General simulation meta data.
Returns
-------
None
Just write a file.
"""
data = sim_lcs_meta.copy()
fit_keys = ['t0', 'e_t0',
'chi2', 'ndof']
MName = sim_meta['model_name']
if MName[:5] in ('salt2', 'salt3'):
fit_keys += ['x0', 'e_x0', 'mb', 'e_mb', 'x1',
'e_x1', 'c', 'e_c', 'cov_x0_x1', 'cov_x0_c',
'cov_mb_x1', 'cov_mb_c', 'cov_x1_c']
for k in fit_keys:
data[k] = []
for obj_ID in fit_res:
fd = fit_res[obj_ID]['params']
snc_out = fit_res[obj_ID]['snc_out']
if snc_out != 'NaN':
data['t0'].append(fd['t0'])
data['e_t0'].append(np.sqrt(snc_out['covariance'][0, 0]))
if MName[:5] in ('salt2', 'salt3'):
par_cov = snc_out['covariance'][1:, 1:]
mb_cov = salt_ut.cov_x0_to_mb(fd['x0'], par_cov)
data['x0'].append(fd['x0'])
data['e_x0'].append(np.sqrt(par_cov[0, 0]))
data['mb'].append(fd['mb'])
data['e_mb'].append(np.sqrt(mb_cov[0, 0]))
data['x1'].append(fd['x1'])
data['e_x1'].append(np.sqrt(par_cov[1, 1]))
data['c'].append(fd['c'])
data['e_c'].append(np.sqrt(par_cov[2, 2]))
data['cov_x0_x1'].append(par_cov[0, 1])
data['cov_x0_c'].append(par_cov[0, 2])
data['cov_x1_c'].append(par_cov[1, 2])
data['cov_mb_x1'].append(mb_cov[0, 1])
data['cov_mb_c'].append(mb_cov[0, 2])
data['chi2'].append(snc_out['chisq'])
data['ndof'].append(snc_out['ndof'])
else:
for k in fit_keys:
data[k].append(np.nan)
for k, v in sim_lcs_meta.items():
data[k] = v
df = pd.DataFrame(data)
table = pa.Table.from_pandas(df)
header = json.dumps(sim_meta, cls=NpEncoder)
table = table.replace_schema_metadata({'header'.encode(): header.encode()})
pq.write_table(table, directory + '.parquet')
print(f"Fit result output file : {directory}.parquet")
def open_fit(file):
"""USe to open fit file.
Parameters
----------
file : str
Fit results parquet file
Returns
-------
pandas.DataFrame
The fit results.
"""
table = pq.read_table(file)
fit = table.to_pandas()
fit.attrs = json.loads(table.schema.metadata['header'.encode()])
fit.set_index(['ID'], inplace=True)
return fit
| 2.15625 | 2 |
submission_json.py | selva604/DA_TBN | 1 | 12798973 | <filename>submission_json.py
import json
from pathlib import Path
import numpy as np
import pandas as pd
from epic_kitchens.meta import training_labels, test_timestamps
def softmax(x):
'''
>>> res = softmax(np.array([0, 200, 10]))
>>> np.sum(res)
1.0
>>> np.all(np.abs(res - np.array([0, 1, 0])) < 0.0001)
True
>>> res = softmax(np.array([[0, 200, 10], [0, 10, 200], [200, 0, 10]]))
>>> np.sum(res, axis=1)
array([ 1., 1., 1.])
>>> res = softmax(np.array([[0, 200, 10], [0, 10, 200]]))
>>> np.sum(res, axis=1)
array([ 1., 1.])
'''
if x.ndim == 1:
x = x.reshape((1, -1))
max_x = np.max(x, axis=1).reshape((-1, 1))
exp_x = np.exp(x - max_x)
return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1))
def top_scores(scores):
top_n_scores_idx = np.argsort(scores)[:, ::-1]
top_n_scores = scores[np.arange(0, len(scores)).reshape(-1, 1), top_n_scores_idx]
return top_n_scores_idx, top_n_scores
def compute_action_scores(verb_scores, noun_scores, n=100):
top_verbs, top_verb_scores = top_scores(verb_scores)
top_nouns, top_noun_scores = top_scores(noun_scores)
top_verb_probs = softmax(top_verb_scores)
top_noun_probs = softmax(top_noun_scores)
action_probs_matrix = top_verb_probs[:, :n, np.newaxis] * top_noun_probs[:, np.newaxis, :]
instance_count = action_probs_matrix.shape[0]
action_ranks = action_probs_matrix.reshape(instance_count, -1).argsort(axis=-1)[:, ::-1]
verb_ranks_idx, noun_ranks_idx = np.unravel_index(action_ranks[:, :n],
dims=(action_probs_matrix.shape[1:]))
# TODO: Reshape, argsort, then convert back to verb/noun indices
segments = np.arange(0, instance_count).reshape(-1, 1)
return ((top_verbs[segments, verb_ranks_idx], top_nouns[segments, noun_ranks_idx]),
action_probs_matrix.reshape(instance_count, -1)[segments, action_ranks[:, :n]])
def action_scores_to_json(actions, scores, prior):
entries = []
for verbs, nouns, segment_scores in zip(*actions, scores):
if prior is None:
entries.append({"{},{}".format(verb, noun): float(score) for verb, noun, score in zip(verbs, nouns, segment_scores)})
else:
entries.append({"{},{}".format(verb, noun): (float(prior[(verb, noun)]) if (verb, noun) in prior else 0.0) * float(score) for verb, noun, score in zip(verbs, nouns, segment_scores)})
return entries
def scores_to_json(scores):
entries = []
for classes, segment_scores in zip(*top_scores(scores)):
entries.append({str(cls): float(score) for cls, score in zip(classes, segment_scores)})
return entries
def compute_score_dicts(results, prior):
verb_scores = results['scores']['verb']
if len(verb_scores.shape) == 4:
verb_scores = verb_scores.mean(axis=(1, 2))
noun_scores = results['scores']['noun']
if len(noun_scores.shape) == 4:
noun_scores = noun_scores.mean(axis=(1, 2))
actions, action_scores = compute_action_scores(verb_scores, noun_scores)
verb_scores_dict = scores_to_json(verb_scores)
noun_scores_dict = scores_to_json(noun_scores)
action_scores_dict = action_scores_to_json(actions, action_scores, prior)
return verb_scores_dict, noun_scores_dict, action_scores_dict
def to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict):
entries = {}
for uid, segment_verb_scores_dict, segment_noun_scores_dict, segment_action_scores_dict in zip(uids,
verb_scores_dict,
noun_scores_dict,
action_scores_dict):
entries[str(uid)] = {
'verb': segment_verb_scores_dict,
'noun': segment_noun_scores_dict,
'action': segment_action_scores_dict
}
return {
'version': '0.1',
'challenge': 'action_recognition',
'results': entries,
}
def dump_scores_to_json(results, uids, filepath, prior):
verb_scores_dict, noun_scores_dict, action_scores_dict = compute_score_dicts(results, prior)
results_dict = to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict)
filepath.parent.mkdir(exist_ok=True, parents=True)
with open(filepath, 'w', encoding='utf8') as f:
json.dump(results_dict, f)
return results_dict
def main(args):
if not args.submission_json.exists():
args.submission_json.mkdir(parents=True, exist_ok=True)
for test_set in ['seen', 'unseen']:
if test_set == 'unseen':
action_counts = training_labels().apply(lambda d: (d['verb_class'], d['noun_class']), axis=1).value_counts()
prior_action = action_counts.div(action_counts.sum())
prior = prior_action
else:
prior = None
results = pd.read_pickle(args.results_dir / ('test_' + test_set + '.pkl'))
uids = np.zeros(results['scores']['verb'].shape[0], dtype=np.int)
timestamps = test_timestamps(test_set)
for i, (idx, row) in enumerate(timestamps.iterrows()):
uids[i] = str(idx)
dump_scores_to_json(results, uids, args.submission_json / (test_set + '.json'), prior)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description="Produce submission JSON from results pickle",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("results_dir", type=Path)
parser.add_argument("submission_json", type=Path)
main(parser.parse_args()) | 2.484375 | 2 |
AWS_Services/showPlacementCompany.py | demirkirans/PlaDat-BLG411E | 0 | 12798974 | <gh_stars>0
import json
import boto3
import time
import decimal
from boto3.dynamodb.conditions import Key,Attr
from botocore.exceptions import ClientError
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
# wanted a simple yield str(o) in the next line,
# but that would mean a yield on the line with super(...),
# which wouldn't work (see my comment below), so...
return (str(o) for o in [o])
return super(DecimalEncoder, self).default(o)
dynamodb = boto3.resource('dynamodb', region_name = 'us-east-2')
def lambda_handler(event, context):
queryStringParameters = event['queryStringParameters']
#get company id
Company_ID = int(queryStringParameters['id'])
### GET CONTACT ###
table = dynamodb.Table('Jobs')
#scan for given student id
response = table.scan(FilterExpression=Attr('Company_ID').eq(Company_ID))
responseBody = { }
placements = []
if response['Count'] == 0:
#no matching item
print("No item was found :/")
return {
"statusCode": 500,
'body': json.dumps('Internal server error- No item found in contact_student table *-*')
}
else:
#extract id
for item in response['Items']:
Placement_ID = item['ID']
#we got placement id
placement = {}
##################### Add attributes from Jobs TAble ##########################
placement['Company_ID'] = int(item['Company_ID'])
placement['Placement_ID'] = int(item['ID'])
placement['Company_name'] = item['Company_name']
placement['title'] = item['title']
placement['description'] = item['description']
placement['category'] = item['category']
placement['contact_info'] = item['contact_info']
placement['department_name'] = item['department_name']
placement['degree'] = item['degree']
placement['location'] = item['location']
placement['salaries'] = int(item['salaries'])
################3 Add attributes from Skills_jobs table #########################
table = dynamodb.Table('Skills_jobs')
skills = []
db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID))
if db_response['Count'] == 0:
return {
"statusCode": 500,
"body": json.dumps('Internal Server Error - No matching skills for placement')
}
else:
for attribute in db_response['Items']:
skills.append(attribute['name'])
#add skills to placement
placement['skills'] = skills
#################### Add attributes from Benefits table #################
table = dynamodb.Table('Benefits')
benefits = []
db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID))
if db_response['Count'] == 0:
return {
"statusCode": 500,
"body": json.dumps('Internal Server Error - No matching benefits for placement')
}
else:
for attribute in db_response['Items']:
benefits.append(attribute['name'])
#add skills to placement
placement['benefits'] = benefits
#################### Add attributes from Certificates_job table #################
table = dynamodb.Table('Certificates_job')
certificates = []
db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID))
if db_response['Count'] == 0:
print("No matching certificates for placement")
else:
for attribute in db_response['Items']:
certificates.append(attribute['name'])
#add skills to placement
placement['certificates'] = certificates
############ WE GOT ALL ATTRIBUTES FOR A SINGLE PLACEMENT #################
placements.append(placement)
responseBody['placements'] = placements
return {
"statusCode": 200,
"body": json.dumps(responseBody)
}
| 2.203125 | 2 |
BackEnd/FaceDetector/FaceExt.py | RickyYXY/The-Mask | 2 | 12798975 | # encoding:utf-8
import os, sys
basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(basepath, 'FaceDetector'))
import requests
import base64
import cv2
import numpy as np
import urllib.request
import base64
def fetchImageFromHttp(image_url, timeout_s=1):
# 该函数是读取url图片
if image_url:
resp = urllib.request.urlopen(image_url, timeout=timeout_s)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
return image
else:
return []
def FaceExtract(img: str, imgtype: str, imgpos: str, facenum=120):
# 该函数的作用是提取图中人脸
message = {}
# client_id 为官网获取的AK, client_secret 为官网获取的SK
host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=DXN8o5eNaheZahxK558I8GOs&client_secret=<KEY>'
response = requests.get(host)
if response:
# print(response.json()['access_token'])
access_token = response.json()['access_token']
request_url = "https://aip.baidubce.com/rest/2.0/face/v3/detect"
if imgtype == 'Local':
with open(img, "rb") as f: # 转为二进制格式
base64_data = base64.b64encode(f.read()) # 使用base64进行加密
base64_data = base64_data.decode()
params = "\"image\":\"{}\",\"image_type\":\"BASE64\", \"max_face_num\":\"120\"".format(base64_data)
params = '{' + params + '}'
elif imgtype == 'url':
params = "\"image\":\"{}\",\"image_type\":\"URL\", \"max_face_num\":\"120\"".format(img)
params = '{' + params + '}'
# print(params['image'])
request_url = request_url + "?access_token=" + access_token
headers = {'content-type': 'application/json'}
response = requests.post(request_url, data=params, headers=headers)
if response:
print (response.json())
# 提取检测到的所有人脸信息
if response.json()['error_code'] != 0:
message['Error Code'] = response.json()['error_code']
message['Error Message'] = response.json()['error_msg']
message['Data'] = None
return message
# raise Exception('人脸检测失败,失败码为{},失败信息为:{}'.format(response.json()['error_code'], response.json()['error_msg']))
face_number = response.json()['result']['face_num']
face_List = []
for num in range(face_number):
face_loc_left = int(response.json()['result']['face_list'][num]['location']['left'])
face_loc_top = int(response.json()['result']['face_list'][num]['location']['top'])
face_loc_width = int(response.json()['result']['face_list'][num]['location']['width'])
face_loc_height = int(response.json()['result']['face_list'][num]['location']['height'])
face_List.append([face_loc_left, face_loc_top, face_loc_width, face_loc_height])
# 这里是读取图像并画框
if imgtype == 'Local':
image = cv2.imread(img)
elif imgtype == 'url':
image = fetchImageFromHttp(img)
# 图片编号起始
search_all_path = []
num = 0
for pos in face_List:
lefttopx = pos[0]
lefttopy = pos[1]
rightbottomx = lefttopx + pos[2]
rightbottomy = lefttopy + pos[3]
# print(lefttopx, lefttopy, rightbottomx, rightbottomy)
cv2.rectangle(image, (lefttopx, lefttopy), (rightbottomx, rightbottomy), (0, 255, 0), 2)
if imgpos == 'Example':
savepath = os.path.join(basepath, 'FaceStatic', 'ExampleFace', 'example_face_' + str(num) + '.jpg')
elif imgpos == 'Search':
pos_name = ','.join([str(lefttopx), str(lefttopy), str(rightbottomx), str(rightbottomy)])
savepath = os.path.join(basepath, 'FaceStatic', 'SearchFace', pos_name + '.jpg')
search_all_path.append(savepath)
# cv2.imwrite("C:/WorkSpace/test/detect_face_"+str(num)+'.jpg', image[lefttopy:rightbottomy, lefttopx:rightbottomx])
cv2.imwrite(savepath, image[lefttopy:rightbottomy, lefttopx:rightbottomx])
num += 1
message['Error Code'] = response.json()['error_code']
message['Error Message'] = message['Error Message'] = response.json()['error_msg']
if imgpos == 'Example':
full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Result.jpg')
cv2.imwrite(full_face_path, image)
message['Data'] = {'ExampleFaces': savepath, 'FacesNum': num, 'FullFace': full_face_path}
elif imgpos == 'Search':
# full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Search.jpg')
# cv2.imwrite(full_face_path, image)
message['Data'] = {'ExampleFaces': search_all_path, 'FacesNum': num, 'FullFace': None}
return message
if __name__ == "__main__":
# imgpath = 'http://xinan.ziqiang.net.cn/ThreeFace.jpeg'
imgpath = 'http://xinan.ziqiang.net.cn/Fq-PpUCF25C61q0muvXAHCok0uK2'
wycpath = 'http://xinan.ziqiang.net.cn/AsFace.jpg'
fetchImageFromHttp(wycpath)
# result = FaceExtract(imgpath, 'url')
# result = FaceExtract(imgpath, 'Local', 'Search')
# cv2.imshow('image', result)
# cv2.waitKey(0)
| 2.578125 | 3 |
courses/bxx-python/b01/s08-compound-interest.py | vinaynk/nooromtech_tutorials | 0 | 12798976 | #
# compound interest
#
# P - principal
# r - annual rate (%)
# n - compounding frequency
# y - number of years
#
# Py = P0 (1 + r/n) ** (ny)
#
P = 1000
r = 2.5 # %
n = 4 # quarterly
y = 5
amount = P
print('Amount (Starting):', amount)
for year in range(1, y+1):
for period in range(1, n+1):
amount *= (1 + r/100/n) # r/100 since r is in %
print('Amount ( year:', year, ', period:', period, ') :', round(amount, 2))
| 3.53125 | 4 |
ecomstore/caching.py | Pythonian/ecomstore | 0 | 12798977 | <filename>ecomstore/caching.py
from django.core.cache import cache
from .settings import CACHE_TIMEOUT
def cache_update(sender, **kwargs):
""" signal for updating a model instance in the cache;
any Model class using this signal must
have a uniquely identifying 'cache_key' property.
"""
item = kwargs.get('instance')
cache.set(item.cache_key, item, CACHE_TIMEOUT)
def cache_evict(sender, **kwargs):
""" signal for updating a model instance in the cache;
any Model class using this signal must
have a uniquely identifying 'cache_key' property.
"""
item = kwargs.get('instance')
cache.delete(item.cache_key)
| 2.359375 | 2 |
openmaptiles/pgutils.py | ldgeo/openmaptiles-tools | 0 | 12798978 | <reponame>ldgeo/openmaptiles-tools<filename>openmaptiles/pgutils.py
import re
from typing import Tuple, Dict
from asyncpg import UndefinedFunctionError, UndefinedObjectError, Connection
from openmaptiles.perfutils import COLOR
async def show_settings(conn: Connection) -> Tuple[Dict[str, str], bool]:
postgis_version = False
results = {}
def parse_postgis_ver(value) -> None:
nonlocal postgis_version
m = re.match(r'POSTGIS="(\d+\.\d+)', value)
postgis_version = float(m.group(1))
settings = {
'version()': None,
'postgis_full_version()': parse_postgis_ver,
'jit': lambda
v: 'disable JIT in PG 11-12 for complex queries' if v != 'off' else '',
'shared_buffers': None,
'work_mem': None,
'maintenance_work_mem': None,
'max_connections': None,
'max_worker_processes': None,
'max_parallel_workers': None,
'max_parallel_workers_per_gather': None,
}
key_len = max((len(v) for v in settings))
for setting, validator in settings.items():
q = f"{'SELECT' if '(' in setting else 'SHOW'} {setting};"
prefix = ''
suffix = ''
try:
res = await conn.fetchval(q)
if validator:
msg = validator(res)
if msg:
prefix, suffix = COLOR.RED, f" {msg}{COLOR.RESET}"
except (UndefinedFunctionError, UndefinedObjectError) as ex:
res = ex.message
prefix, suffix = COLOR.RED, COLOR.RESET
print(f"* {setting:{key_len}} = {prefix}{res}{suffix}")
results[setting] = res
return results, postgis_version
| 1.96875 | 2 |
bluesky_widgets/models/_tests/test_image.py | bsobhani/bluesky-widgets | 0 | 12798979 | <gh_stars>0
from bluesky_live.run_builder import build_simple_run
import numpy
from ..plot_builders import Images
from ..plot_specs import AxesSpec, FigureSpec
from ...headless.figures import HeadlessFigure
def test_image():
"Test Images with a 2D array."
run = build_simple_run({"ccd": numpy.random.random((11, 13))})
model = Images("ccd")
view = HeadlessFigure(model.figure)
assert not model.figure.axes[0].images
model.add_run(run)
assert model.figure.axes[0].images
view.close()
def test_image_reduction():
"Test Images with higher-dimensional arrays."
dims = (5, 7, 11, 13, 17, 19)
for i in range(3, len(dims)):
run = build_simple_run({"ccd": numpy.random.random(dims[:i])})
model = Images("ccd")
view = HeadlessFigure(model.figure)
model.add_run(run)
view.close()
def test_properties():
"Touch various accessors"
run = build_simple_run({"ccd": numpy.random.random((11, 13))})
model = Images("c * ccd", namespace={"c": 3})
view = HeadlessFigure(model.figure)
model.add_run(run)
assert model.runs[0] is run
assert model.field == "c * ccd"
assert dict(model.namespace) == {"c": 3}
assert model.needs_streams == ("primary",)
view.close()
def test_figure_set_after_instantiation():
axes = AxesSpec()
model = Images("ccd", axes=axes)
assert model.figure is None
figure = FigureSpec((axes,), title="")
assert model.figure is figure
view = HeadlessFigure(model.figure)
view.close()
| 2.234375 | 2 |
hw3/Fast_Map.py | tian-yu/INF552 | 0 | 12798980 | #
# INF 552 Homework 3
# Part 2: Fast Map
# Group Members: <NAME> (zhan198), <NAME> (minyihua), <NAME> (jeffyjac)
# Date: 2/27/2018
# Programming Language: Python 3.6
#
import numpy as np
import matplotlib.pyplot as plt
DIMENSION = 2
DATA_SIZE = 10
# WORDS = ["acting", "activist", "compute", "coward","forward","interaction","activity","odor","order","international"]
WORDS = []
data_file_name = "fastmap-data.txt"
words_file_name = 'fastmap-wordlist.txt'
table = np.zeros(shape=(DATA_SIZE, DATA_SIZE))
cood = np.zeros(shape=(DATA_SIZE, DIMENSION))
pivot = []
def main():
readFile(data_file_name)
print("\nOriginal table:")
readWords(words_file_name)
print(WORDS)
printTable()
for i in range(DIMENSION):
print("\n\nThe {i}st cood: ".format(i=i+1))
pickLongestPair()
calculateCoordinate(i)
print("\nUpdate table: ")
updateTable(i)
printTable()
plotResult()
def readFile(filename):
with open(filename, "r") as file:
print("Original input:")
for line in file:
line_array = line.split()
print(line_array)
table[int(line_array[0]) - 1][int(line_array[1]) - 1] = \
table[int(line_array[1]) - 1][int(line_array[0]) - 1] = float(line_array[2])
def readWords(filename):
global WORDS
with open(filename) as file:
WORDS = file.read().splitlines()
def printTable():
for row in table:
print(row)
def pickLongestPair():
max = np.amax(table)
indices = list(zip(*np.where(table == max)))
print("The longest distance pair is {pair}".format(pair = indices[0]))
print("Pivot is piont {piv}".format(piv = indices[0][0]))
pivot.append(indices[0])
def calculateCoordinate(dimen):
a = pivot[dimen][0]
b = pivot[dimen][1]
print("The coordinate table")
for i in range(len(table)):
cood[i][dimen] = (np.power(table[a][i],2) + np.power(table[a][b],2) - np.power(table[i][b],2))/ (2 * table[a][b])
print ("{i}\t({x}, {y})".format(i=i, x= round(cood[i][0], 3),y=round(cood[i][1], 3)))
def updateTable(dimen):
for i in range(0, DATA_SIZE):
for j in range(0, DATA_SIZE):
table[i][j] = np.sqrt(np.power(table[i][j],2) - np.power((cood[i][dimen] - cood[j][dimen]),2))
def plotResult():
x = cood[:, 0]
y = cood[:, 1]
fig, ax = plt.subplots()
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.scatter(x, y)
plt.scatter(x, y, color="red", s=30)
plt.title("Fast Map Result")
for i, txt in enumerate(WORDS):
ax.annotate(txt, (x[i], y[i]))
plt.show()
main()
| 3.40625 | 3 |
waves/WAVtoMIF.py | willzhang05/fpga-synthesizer | 3 | 12798981 | <filename>waves/WAVtoMIF.py<gh_stars>1-10
#!/usr/bin/python
import wave, sys
if len(sys.argv) < 2:
print("Missing args.")
print("Expected: input")
waveReader = wave.open(sys.argv[1], 'rb')
depth = waveReader.getnframes()
width = 16 # wav is 16 bits
radix = "HEX" # address and data in hex
print("DEPTH =", str(depth)+";")
print("WIDTH =", str(width)+";")
print("ADDRESS_RADIX =", radix+";")
print("DATA_RADIX =", radix+";")
print("CONTENT")
print("BEGIN\n")
for i in range(waveReader.getnframes()):
frameAddr = "%X" % i
frameValue = waveReader.readframes(1)
hexFrame = "%02X%02X" % (frameValue[1], frameValue[0])
print(frameAddr, ":", hexFrame+";")
print("\nEND;")
| 3.125 | 3 |
algorithms/python/leetcode/UniquePaths.py | ytjia/coding-pratice | 0 | 12798982 | <filename>algorithms/python/leetcode/UniquePaths.py
# -*- coding: utf-8 -*-
# Authors: <NAME> <<EMAIL>>
"""
A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
The robot can only move either down or right at any point in time. The robot is trying to reach
the bottom-right corner of the grid (marked 'Finish' in the diagram below).
How many possible unique paths are there?
https://leetcode.com/problems/unique-paths/description/
"""
class Solution(object):
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
# C^m_m+n
num = 1
den = 1
for i in range(m + n - 2, n - 1, -1):
num *= i
for i in range(m - 1, 0, -1):
den *= i
return num / den
| 3.84375 | 4 |
datascience/sympy/evaluating2.py | janbodnar/Python-Course | 13 | 12798983 | <reponame>janbodnar/Python-Course
#!/usr/bin/python
import numpy
from sympy import Symbol, lambdify, sin, pprint
a = numpy.arange(10)
x = Symbol('x')
expr = sin(x)
f = lambdify(x, expr, "numpy")
pprint(f(a))
| 3.09375 | 3 |
app/backend-test/core_datasets/run05_test_directory_size_calculation.py | SummaLabs/DLS | 32 | 12798984 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import os
import glob
from app.backend.core.utils import getDirectorySizeInBytes, humanReadableSize
if __name__ == '__main__':
path='../../../data/datasets'
for ii,pp in enumerate(glob.glob('%s/*' % path)):
tbn=os.path.basename(pp)
tsize = getDirectorySizeInBytes(pp)
tsizeHuman = humanReadableSize(tsize)
print ('[%d] %s : %s (%d)' % (ii, tbn, tsizeHuman, tsize)) | 2.34375 | 2 |
tests/cli/_trivial.py | pcarranzav2/pyuavcan | 0 | 12798985 | #
# Copyright (c) 2019 UAVCAN Development Team
# This software is distributed under the terms of the MIT License.
# Author: <NAME> <<EMAIL>>
#
import pytest
import subprocess
from ._subprocess import run_cli_tool
def _unittest_trivial() -> None:
run_cli_tool('show-transport', timeout=2.0)
with pytest.raises(subprocess.CalledProcessError):
run_cli_tool(timeout=2.0)
with pytest.raises(subprocess.CalledProcessError):
run_cli_tool('invalid-command', timeout=2.0)
with pytest.raises(subprocess.CalledProcessError):
run_cli_tool('dsdl-gen-pkg', 'nonexistent/path', timeout=2.0)
with pytest.raises(subprocess.CalledProcessError): # Look-up of a nonexistent package requires large timeout
run_cli_tool('pub', 'nonexistent.data.Type.1.0', '{}', '--tr=Loopback(None)', timeout=5.0)
| 1.867188 | 2 |
insomnia/agents/d4pg_learner.py | takeru1205/Insomnia | 0 | 12798986 | import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import queue
from insomnia.utils import empty_torch_queue
from insomnia.explores.gaussian_noise import GaussianActionNoise
from insomnia.numeric_models import d4pg
from insomnia.numeric_models.misc import l2_projection
class LearnerD4PG(object):
"""Policy and value network update routine. """
def __init__(self, policy_net, target_policy_net, learner_w_queue,
alpha, beta, input_dims, n_actions, fc1_dims, fc2_dims, name, v_min, v_max, n_atoms=51):
self.v_min = v_min
self.v_max = v_max
self.num_atoms = n_atoms
self.num_train_steps = 10000
self.batch_size = 256
self.tau = 0.001
self.gamma = 0.998
self.prioritized_replay = 0
self.learner_w_queue = learner_w_queue
self.delta_z = (self.v_max - self.v_min) / (self.num_atoms - 1)
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Noise process
self.noise = GaussianActionNoise(mu=np.zeros(n_actions))
# Value and policy nets
self.value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name,
self.v_min, self.v_max, self.num_atoms)
self.policy_net = policy_net
self.target_value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name,
self.v_min, self.v_max, self.num_atoms)
self.target_policy_net = target_policy_net
for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()):
target_param.data.copy_(param.data)
for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()):
target_param.data.copy_(param.data)
self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=beta)
self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=alpha)
self.value_criterion = nn.BCELoss(reduction='none')
def _update_step(self, batch, replay_priority_queue, update_step):
update_time = time.time()
state, action, reward, next_state, done, gamma, weights, inds = batch
state = np.asarray(state)
action = np.asarray(action)
reward = np.asarray(reward)
next_state = np.asarray(next_state)
done = np.asarray(done)
weights = np.asarray(weights)
inds = np.asarray(inds).flatten()
state = torch.from_numpy(state).float().to(self.device)
next_state = torch.from_numpy(next_state).float().to(self.device)
action = torch.from_numpy(action).float().to(self.device)
reward = torch.from_numpy(reward).float().to(self.device)
done = torch.from_numpy(done).float().to(self.device)
# ------- Update critic -------
# Predict next actions with target policy network
next_action = self.target_policy_net(next_state)
# Predict Z distribution with target value network
target_value = self.target_value_net.get_probs(next_state, next_action.detach())
# Get projected distribution
target_z_projected = l2_projection._l2_project(next_distr_v=target_value,
rewards_v=reward,
dones_mask_t=done,
gamma=self.gamma ** 5,
n_atoms=self.num_atoms,
v_min=self.v_min,
v_max=self.v_max,
delta_z=self.delta_z)
target_z_projected = torch.from_numpy(target_z_projected).float().to(self.device)
critic_value = self.value_net.get_probs(state, action)
critic_value = critic_value.to(self.device)
value_loss = self.value_criterion(critic_value, target_z_projected)
value_loss = value_loss.mean(axis=1)
# Update priorities in buffer
td_error = value_loss.cpu().detach().numpy().flatten()
priority_epsilon = 1e-4
if self.prioritized_replay:
weights_update = np.abs(td_error) + priority_epsilon
replay_priority_queue.put((inds, weights_update))
value_loss = value_loss * torch.tensor(weights).float().to(self.device)
# Update step
value_loss = value_loss.mean()
self.value_optimizer.zero_grad()
value_loss.backward()
self.value_optimizer.step()
# -------- Update actor -----------
policy_loss = self.value_net.get_probs(state, self.policy_net(state))
policy_loss = policy_loss * torch.from_numpy(self.value_net.z_atoms).float().to(self.device)
policy_loss = torch.sum(policy_loss, dim=1)
policy_loss = -policy_loss.mean()
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - self.tau) + param.data * self.tau
)
for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - self.tau) + param.data * self.tau
)
# Send updated learner to the queue
if update_step.value % 100 == 0:
try:
params = [p.data.cpu().detach().numpy() for p in self.policy_net.parameters()]
self.learner_w_queue.put(params)
except:
pass
def run(self, training_on, batch_queue, replay_priority_queue, update_step):
while update_step.value < self.num_train_steps:
try:
batch = batch_queue.get_nowait()
except queue.Empty:
continue
self._update_step(batch, replay_priority_queue, update_step)
update_step.value += 1
if update_step.value % 1000 == 0:
print("Training step ", update_step.value)
training_on.value = 0
empty_torch_queue(self.learner_w_queue)
empty_torch_queue(replay_priority_queue)
print("Exit learner.")
| 2.0625 | 2 |
OPCDataTransfer/OPC/OPC.py | Shanginre/OPCDataTransfer | 1 | 12798987 | <reponame>Shanginre/OPCDataTransfer<filename>OPCDataTransfer/OPC/OPC.py
#!/usr/bin/env python3.6
# -*- coding: UTF-8 -*-
import OpenOPC
import pywintypes
import datetime
from builtins import print
import json
import copy
import logging
import os
class ConnectionOPC:
def __init__(self, conf_settings):
self._debug = None
self._logger = None
self._verbose = None
self._frequency = None
self._client = None
self._param_list = None
self._dict_codes_plotting_names = None
self._dict_opc_names_codes = None
self._dict_code_keys_opc_names = None
self._parameters_name_string = None
self._debug = conf_settings['debug']
self._set_logger(conf_settings)
self._verbose = conf_settings['verbose']
self._set_frequency(conf_settings)
self._set_opc_client(conf_settings['opc_server'])
# get a list of all parameter names from the OPC server
self._param_list = self._client.list(conf_settings['tags_branch_opc_server'], recursive=True)
# get dictionaries of tag codes and their OPC names
tags_settings_dicts = self._get_settings_dicts(conf_settings)
self._set_dict_codes_plotting_names(tags_settings_dicts['codes_and_plotting_names'])
self._dict_opc_names_codes = tags_settings_dicts['opc_names_and_codes']
self._set_dict_code_keys_opc_names()
self._set_parameters_name_string()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
groups = self._client.groups()
self._client.remove(copy.deepcopy(list(groups)))
self._client.close()
self._print('OPC client close the connection')
def write(self, list_data_string):
try:
self._client.write(list_data_string)
except OpenOPC.TimeoutError:
self._print("Timeout error OPC occured")
def _print(self, message):
if self._verbose:
print(message)
if self._debug:
self._logger.info(message)
def get_list_of_current_values(self):
current_date_string = datetime.datetime.now()
param_array = list()
try:
if not self._client.groups():
# Read 1 times the values and determine the group of opc tags, which will continue to use
for name, value, quality, timeRecord in self._client.iread(self._param_list, group='Group0', update=1):
param_array.append(self._get_dict_from_opc_data(name, value, current_date_string))
else:
for name, value, quality, timeRecord in self._client.iread(group='Group0', update=1):
param_array.append(self._get_dict_from_opc_data(name, value, current_date_string))
if self._debug or self._verbose:
self._print('Data has been read from the OPC')
for item in param_array:
self._print(item)
except OpenOPC.TimeoutError:
self._print("OPC TimeoutError occured")
return param_array
def convert_simulation_data_to_opc_data(self, current_values_list):
list_opc_values = list()
for value_dict in current_values_list:
cur_time = value_dict.pop('time', None)
cur_value = value_dict.pop('value', None)
opc_tag_name = self._get_opc_tag_name(value_dict)
list_opc_values.append((opc_tag_name, cur_value))
if self._debug or self._verbose:
self._print((opc_tag_name, cur_value, cur_time))
return list_opc_values
def _set_opc_client(self, opc_server_name):
pywintypes.datetime = pywintypes.TimeType
self._client = OpenOPC.client()
self._client.connect(opc_server_name)
self._print('connected to OPC server ' + opc_server_name)
def _set_logger(self, conf_settings):
if self._debug:
logs_file_path = conf_settings['logs_file_path']
if not logs_file_path:
logs_file_path = os.path.abspath(
os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/logs.log')))
debug_level_string = conf_settings['debug_level']
if debug_level_string:
debug_level = logging.getLevelName(debug_level_string)
else:
debug_level = logging.DEBUG
logging.basicConfig(level=debug_level,
format='%(asctime)s %(name)s %(levelname)s:%(message)s',
filename=logs_file_path)
self._logger = logging.getLogger(__name__)
def _set_frequency(self, conf_settings):
frequency = conf_settings['frequency']
if frequency is None:
self._frequency = 5
self._print('data refresh rate is set by default ' + str(self._frequency))
else:
self._frequency = frequency
def get_frequency(self):
return self._frequency
@staticmethod
def _get_settings_dicts(conf_settings):
# TODO in production, preferably an HTTP request
tags_settings_file_path = conf_settings['tags_settings_file_path']
if not tags_settings_file_path:
tags_settings_file_path = os.path.abspath(
os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/tags_settings_sample.json')))
with open(tags_settings_file_path, 'r') as read_file:
tags_settings_dicts = json.load(read_file)
return tags_settings_dicts
def _set_dict_codes_plotting_names(self, dict_codes_plotting_names):
dict_with_tuple_keys = dict()
for tag_name, list_codes_plotting_names in dict_codes_plotting_names.items():
for code_plotting_name_dict in list_codes_plotting_names:
dict_with_tuple_keys[(tag_name, code_plotting_name_dict['key'])] = code_plotting_name_dict['value']
self._dict_codes_plotting_names = dict_with_tuple_keys
def _set_dict_code_keys_opc_names(self):
dict_with_tuple_keys = dict()
for opc_name, codes_dict in self._dict_opc_names_codes.items():
dict_with_tuple_keys[self._get_sorted_tuple_values_from_dict(codes_dict)] = opc_name
self._dict_code_keys_opc_names = dict_with_tuple_keys
def get_codes_plotting_names_dict(self):
return self._dict_codes_plotting_names
def get_opc_names_codes_dict(self):
return self._dict_opc_names_codes
def _get_dict_from_opc_data(self, parameter_name, value, current_date_string):
dict_param_value = {**self._dict_opc_names_codes.get(parameter_name),
'value': value,
'time': current_date_string}
return dict_param_value
@staticmethod
def _get_sorted_tuple_values_from_dict(_dict):
values_list = list()
for k in sorted(_dict.keys()):
values_list.append(_dict[k])
return tuple(values_list)
def _get_opc_tag_name(self, value_dict):
keys_tuple = self._get_sorted_tuple_values_from_dict(value_dict)
return self._dict_code_keys_opc_names.get(keys_tuple)
def _set_parameters_name_string(self):
if self._dict_opc_names_codes:
dict_codes_first_value = next(iter(self._dict_opc_names_codes.values()))
self._parameters_name_string = ','.join(list(dict_codes_first_value.keys())) + ',value,time'
else:
self._parameters_name_string = ''
def get_parameters_name_string(self):
return self._parameters_name_string
| 2.09375 | 2 |
test/test_si7021.py | arkjedrz/si7021 | 0 | 12798988 | <gh_stars>0
import unittest
import si7021
class Si7021SensorTests(unittest.TestCase):
def setUp(self):
# TODO: checks for Windows, but should check for embedded platform
import os
if os.name == 'Windows':
raise OSError
self._sensor = si7021.Si7021Sensor()
def _assert_in_range(self, action, param_name, min, max):
value = action()
self.assertGreaterEqual(value, min, f'{param_name} must be greater or equal to {min}.')
self.assertLessEqual(value, max, f'{param_name} must be less or equal to {max}.')
def test_read_humidity(self):
self._assert_in_range(self._sensor.humidity, 'Humidity', 0, 80)
def test_read_temperature(self):
self._assert_in_range(self._sensor.temperature, 'Temperature', -10, 85)
if __name__ == '__main__':
unittest.main() | 3.09375 | 3 |
dlcli/api/user.py | outlyerapp/dlcli | 1 | 12798989 | import logging
from wrapper import *
logger = logging.getLogger(__name__)
# noinspection PyUnusedLocal
def get_user(url='', key='', timeout=60, **kwargs):
return get(url + '/user', headers={'Authorization': "Bearer " + key}, timeout=timeout).json()
# noinspection PyUnusedLocal
def get_user_tokens(url='', key='', timeout=60, **kwargs):
return get(url + '/user/tokens',
headers={'Authorization': "Bearer " + key}, timeout=timeout).json()
# noinspection PyUnusedLocal
def create_user_token(url='', key='', token_name='', timeout=60, **kwargs):
return post(url + '/user/tokens',
headers={'Authorization': "Bearer " + key},
data={'name': token_name}, timeout=timeout).json()
# noinspection PyUnusedLocal
def delete_user_token(url='', key='', token_name='', timeout=60, **kwargs):
return delete(url + '/user/tokens/' + token_name,
headers={'Authorization': "Bearer " + key}, timeout=timeout)
| 2.484375 | 2 |
galaxy/objectstore/lwr.py | jmchilton/lwr | 1 | 12798990 | from __future__ import absolute_import # Need to import lwr_client absolutely.
from ..objectstore import ObjectStore
try:
from galaxy.jobs.runners.lwr_client.manager import ObjectStoreClientManager
except ImportError:
from lwr.lwr_client.manager import ObjectStoreClientManager
class LwrObjectStore(ObjectStore):
"""
Object store implementation that delegates to a remote LWR server.
This may be more aspirational than practical for now, it would be good to
Galaxy to a point that a handler thread could be setup that doesn't attempt
to access the disk files returned by a (this) object store - just passing
them along to the LWR unmodified. That modification - along with this
implementation and LWR job destinations would then allow Galaxy to fully
manage jobs on remote servers with completely different mount points.
This implementation should be considered beta and may be dropped from
Galaxy at some future point or significantly modified.
"""
def __init__(self, config, config_xml):
self.lwr_client = self.__build_lwr_client(config_xml)
def exists(self, obj, **kwds):
return self.lwr_client.exists(**self.__build_kwds(obj, **kwds))
def file_ready(self, obj, **kwds):
return self.lwr_client.file_ready(**self.__build_kwds(obj, **kwds))
def create(self, obj, **kwds):
return self.lwr_client.create(**self.__build_kwds(obj, **kwds))
def empty(self, obj, **kwds):
return self.lwr_client.empty(**self.__build_kwds(obj, **kwds))
def size(self, obj, **kwds):
return self.lwr_client.size(**self.__build_kwds(obj, **kwds))
def delete(self, obj, **kwds):
return self.lwr_client.delete(**self.__build_kwds(obj, **kwds))
# TODO: Optimize get_data.
def get_data(self, obj, **kwds):
return self.lwr_client.get_data(**self.__build_kwds(obj, **kwds))
def get_filename(self, obj, **kwds):
return self.lwr_client.get_filename(**self.__build_kwds(obj, **kwds))
def update_from_file(self, obj, **kwds):
return self.lwr_client.update_from_file(**self.__build_kwds(obj, **kwds))
def get_store_usage_percent(self):
return self.lwr_client.get_store_usage_percent()
def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None):
return None
def __build_kwds(self, obj, **kwds):
kwds['object_id'] = obj.id
return kwds
pass
def __build_lwr_client(self, config_xml):
url = config_xml.get("url")
private_token = config_xml.get("private_token", None)
transport = config_xml.get("transport", None)
manager_options = dict(transport=transport)
client_options = dict(url=url, private_token=private_token)
lwr_client = ObjectStoreClientManager(**manager_options).get_client(client_options)
return lwr_client
def shutdown(self):
pass
| 2.359375 | 2 |
Algorithm/String/242. Valid Anagram.py | smsubham/Data-Structure-Algorithms-Questions | 0 | 12798991 | #https://leetcode.com/problems/valid-anagram/
#https://leetcode.com/problems/valid-anagram/discuss/66499/Python-solutions-(sort-and-dictionary).
def isAnagram1(self, s, t):
dic1, dic2 = {}, {}
for item in s:
dic1[item] = dic1.get(item, 0) + 1
for item in t:
dic2[item] = dic2.get(item, 0) + 1
return dic1 == dic2
def isAnagram2(self, s, t):
dic1, dic2 = [0]*26, [0]*26
for item in s:
dic1[ord(item)-ord('a')] += 1
for item in t:
dic2[ord(item)-ord('a')] += 1
return dic1 == dic2
def isAnagram3(self, s, t):
return sorted(s) == sorted(t) | 3.90625 | 4 |
pyshtools/constant/Mars.py | dilkins/SHTOOLS | 1 | 12798992 | <filename>pyshtools/constant/Mars.py
"""
pyshtools constants for the planet Mars.
Each object is an astropy Constant that possesses the attributes name, value,
unit, uncertainty, and reference.
"""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import numpy as _np
from astropy.constants import Constant as _Constant
from astropy.constants import G as _G
gm_mars = _Constant(
abbrev='gm_mars',
name='Gravitational constant times the mass of Mars',
value=0.4282837581575610e+14,
unit='m3 / s2',
uncertainty=0.18167460e+6,
reference='<NAME>., <NAME>, <NAME> (2016). '
'An improved JPL Mars gravity field and orientation from Mars orbiter '
'and lander tracking data, Icarus, 274, 253-260, '
'doi:10.1016/j.icarus.2016.02.052')
mass_mars = _Constant(
abbrev='mass_mars',
name='Mass of Mars',
value=gm_mars.value / _G.value,
unit='kg',
uncertainty=_np.sqrt((gm_mars.uncertainty / _G.value)**2 +
(gm_mars.value * _G.uncertainty / _G.value**2)**2
),
reference='Derived from gm_mars and G.')
r_mars = _Constant(
abbrev='r_mars',
name='Mean radius of Mars',
value=3389.500e3,
unit='m',
uncertainty=0.0,
reference='MarsTopo2600: Wieczorek, <NAME>. (2015). Gravity and '
'topography of the terrestrial planets. In <NAME> & <NAME> '
'(Eds.), Treatise on Geophysics, 2nd ed., Vol. 10, pp. 153-193). '
'Oxford, Elsevier-Pergamon, doi:10.1016/B978-0-444-53802-4.00169-X.')
density_mars = _Constant(
abbrev='density_mars',
name='Mean density of Mars',
value=3 * mass_mars.value / (_np.pi * 4 * r_mars.value**3),
unit='kg / m3',
uncertainty=_np.sqrt((3 * mass_mars.uncertainty /
(_np.pi * 4 * r_mars.value**3))**2
+ (3 * 3 * mass_mars.value *
r_mars.uncertainty /
(_np.pi * 4 * r_mars.value**4))**2
),
reference='Derived from mass_mars and r_mars.')
g0_mars = _Constant(
abbrev='g0_mars',
name='Mean surface gravity of Mars at mean planetary radius, '
'ignoring rotation and tides',
value=gm_mars.value / r_mars.value**2,
unit='m / s2',
uncertainty=_np.sqrt((gm_mars.uncertainty / r_mars.value**2)**2
+ (2 * gm_mars.value * r_mars.uncertainty
/ r_mars.value**3)**2
),
reference='Derived from gm_mars and r_mars.')
omega_mars = _Constant(
abbrev='omega_mars',
name='Angular spin rate of Mars',
value=350.891985307 * 2 * _np.pi / 360 / (24 * 60 * 60),
unit='rad / s',
uncertainty=0.000000003 * 2 * _np.pi / 360 / (24 * 60 * 60),
reference='<NAME>., <NAME>, <NAME> (2016). '
'An improved JPL Mars gravity field and orientation from Mars orbiter '
'and lander tracking data, Icarus, 274, 253-260, '
'doi:10.1016/j.icarus.2016.02.052')
a_mars = _Constant(
abbrev='a_mars',
name='Semimajor axis of the Mars reference ellipsoid',
value=3395428.0,
unit='m',
uncertainty=19.0,
reference='<NAME>, <NAME>, and <NAME> (2010). '
'A new reference equipotential surface, and reference ellipsoid for '
'the planet Mars. Earth, Moon, and Planets, 106, 1-13, '
'doi:10.1007/s11038-009-9342-7.')
b_mars = _Constant(
abbrev='b_mars',
name='Semiminor axis of the Mars reference ellipsoid',
value=3377678.0,
unit='m',
uncertainty=19.0,
reference='<NAME>, <NAME>, and <NAME> (2010). '
'A new reference equipotential surface, and reference ellipsoid for '
'the planet Mars. Earth, Moon, and Planets, 106, 1-13, '
'doi:10.1007/s11038-009-9342-7.')
f_mars = _Constant(
abbrev='f_mars',
name='Flattening of the Mars reference ellipsoid',
value=(a_mars.value - b_mars.value) / a_mars.value,
unit='',
uncertainty=_np.sqrt((a_mars.uncertainty * (a_mars.value - b_mars.value)
/ a_mars.value**2)**2
+ (_np.sqrt(a_mars.uncertainty**2 +
b_mars.uncertainty**2) / a_mars.value)**2
),
reference='<NAME>, <NAME>, and <NAME> (2010). '
'A new reference equipotential surface, and reference ellipsoid for '
'the planet Mars. Earth, Moon, and Planets, 106, 1-13, '
'doi:10.1007/s11038-009-9342-7.')
u0_mars = _Constant(
abbrev='u0_mars',
name='Theoretical normal gravity potential of the reference ellipsoid',
value=12654875.0,
unit='m2 / s2',
uncertainty=69.0,
reference='<NAME>, <NAME>, and <NAME> (2010). '
'A new reference equipotential surface, and reference ellipsoid for '
'the planet Mars. Earth, Moon, and Planets, 106, 1-13, '
'doi:10.1007/s11038-009-9342-7.')
| 2.40625 | 2 |
hard-gists/3791168/snippet.py | jjhenkel/dockerizeme | 21 | 12798993 | """
Demo of json_required decorator for API input validation/error handling
"""
import inspect
import functools
import json
from traceback import format_exception
from flask import jsonify, request
import sys
from flask.exceptions import JSONBadRequest
from flask import Flask
import re
app = Flask(__name__)
def api_error_response(code=404, message="Requested resource was not found", errors=list()):
"""
Convenience function for returning a JSON response that includes
appropriate error messages and code.
"""
response = jsonify(dict(code=code, message=message, errors=errors, success=False))
response.status_code = code
return response
def bad_json_error_response():
"""
Convenience function for returning an error message related to
malformed/missing JSON data.
"""
return api_error_response(code=400,
message="There was a problem parsing the supplied JSON data. Please send valid JSON.")
def json_required(func=None, required_fields={}, validations=[]):
"""
Decorator used to validate JSON input to an API request
"""
if func is None:
return functools.partial(json_required, required_fields=required_fields, validations=validations)
@functools.wraps(func)
def decorated_function(*args, **kwargs):
try:
#If no JSON was supplied (or it didn't parse correctly)
try:
if request.json is None:
return bad_json_error_response()
except JSONBadRequest:
return bad_json_error_response()
#Check for specific fields
errors = []
def check_required_fields(data, fields):
for field, requirements in fields.iteritems():
nested_fields = type(requirements) == dict
if data.get(field) in (None, ''):
if nested_fields:
error_msg = requirements.get('message')
else:
error_msg = requirements
errors.append({'field': field, 'message': error_msg})
elif nested_fields:
check_required_fields(data[field], requirements.get('fields', {}))
check_required_fields(request.json, required_fields)
for validation_field, validation_message, validation_func in validations:
func_args = inspect.getargspec(validation_func).args
func_params = []
for arg in func_args:
func_params.append(request.json.get(arg))
if not validation_func(*func_params):
errors.append({'field': validation_field, 'message': validation_message})
if errors:
return api_error_response(code=422, message="JSON Validation Failed", errors=errors)
except Exception:
#For internal use, nice to have the traceback in the API response for debugging
#Probably don't want to include for public APIs
etype, value, tb = sys.exc_info()
error_info = ''.join(format_exception(etype, value, tb))
return api_error_response(code=500, message="Internal Error validating API input", errors=[{'message':error_info}])
return func(*args, **kwargs)
return decorated_function
EMAIL_REGEX = re.compile(r"[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?")
def verify_account_available(email):
"""
Check to see if this email is already registered
"""
#Run a query, use an ORM, use Twilio to call someone and ask them :-)
return True
def valid_date_of_birth(date_of_birth):
"""
Does the supplied date string meet our criteria for a date of birth
"""
#Do whatever you need to do...
return True
@app.route("/do/something", methods=['POST'])
@json_required(
required_fields={
'first_name':"Please provide your first name.",
'last_name':"Please provide your last name.",
'email':'Please specify a valid email address',
'date_of_birth':'Please provide your date of birth'
},
validations=[
('email', 'Please provide a valid email address', lambda email: email is not None and EMAIL_REGEX.match(email)),
('email', "This email is already in use. Please try a different email address.", verify_account_available),
('date_of_birth', 'Please provide a valid date of birth', valid_date_of_birth)
]
)
def do_something_useful():
#Confidently use the data in request.json...
return jsonify(dict(status='OK'))
if __name__ == "__main__":
with app.test_client() as client:
response = client.post(
'/do/something',
data=json.dumps({ "first_name": "Brian",
"last_name": "Corbin",
"email": "<EMAIL>",
"date_of_birth": "01/01/1970" }),
follow_redirects=True,
content_type='application/json')
response_dict = json.loads(response.data)
assert response_dict['status'] == 'OK'
response = client.post(
'/do/something',
data=json.dumps({ "last_name": "Corbin",
"email": "<EMAIL>",
"date_of_birth": "01/01/1970" }),
follow_redirects=True,
content_type='application/json')
response_dict = json.loads(response.data)
assert response.status_code == 422
assert response_dict['code'] == 422
assert response_dict['message'] == "JSON Validation Failed"
assert len(response_dict['errors']) == 1
assert response_dict['errors'][0]['field'] == 'first_name'
assert response_dict['errors'][0]['message'] == 'Please provide your first name.'
| 3.453125 | 3 |
ACME/render/camera_extrinsics.py | mauriziokovacic/ACME | 3 | 12798994 | import torch
from ..math.cross import *
from ..math.normvec import *
class CameraExtrinsic(object):
"""
A class representing the camera extrinsic properties
Attributes
----------
position : Tensor
the camera position
target : Tensor
the camera target
up_vector : Tensor
the camera up vector
device : str or torch.device
the device to store the tensors to
Methods
-------
look_at(target)
sets the camera target
look_from(position)
sets the camera position
direction()
returns the camera direction
view_matrix()
returns the current view matrix
to(**kwargs)
changes extrinsic dtype and/or device
"""
def __init__(self, position=(0, 0, 0), target=(0, 0, 1), up_vector=(0, 1, 0), device='cuda:0'):
"""
Parameters
----------
position : list or tuple (optional)
the camera position (default is (0, 0, 0))
target : list or tuple (optional)
the camera target (default is (0, 0, 1))
up_vector : list or tuple (optional)
the camera up vector (default is (0, 1, 0))
device : str or torch.device (optional)
the device to store the tensors to (default is 'cuda:0')
"""
self.position = torch.tensor(position, dtype=torch.float, device=device)
self.target = torch.tensor(target, dtype=torch.float, device=device)
self.up_vector = torch.tensor(up_vector, dtype=torch.float, device=device)
self._device = device
def look_at(self, target):
"""
Sets the camera target
Parameters
----------
target : Tensor
the (3,) target tensor
Returns
-------
CameraExtrinsic
the extrinsic itself
"""
self.target = target
return self
def look_from(self, position):
"""
Sets the camera position
Parameters
----------
position : Tensor
the (3,) position tensor
Returns
-------
CameraExtrinsic
the extrinsic itself
"""
self.position = position
return self
def direction(self):
"""
Returns the camera direction
Returns
-------
Tensor
the (3,) direction tensor
"""
return self.target - self.position
def view_matrix(self):
"""
Returns the current view matrix
Returns
-------
Tensor
a (4,4,) view matrix
"""
z = normr(self.direction().unsqueeze(0))
x = normr(cross(self.up_vector.unsqueeze(0), z))
y = cross(z, x)
p = self.position.unsqueeze(0)
M = torch.cat((torch.cat((x.t(), y.t(), z.t(), -p.t()), dim=1),
torch.tensor([[0, 0, 0, 1]], dtype=torch.float, device=self.device)),
dim=0)
return M
def to(self, **kwargs):
"""
Changes the extrinsic dtype and/or device
Parameters
----------
kwargs : ...
Returns
-------
CameraExtrinsic
the extrinsic itself
"""
if 'device' in kwargs:
self._device = kwargs['device']
self.position = self.position.to(**kwargs)
self.target = self.target.to(**kwargs)
self.up_vector = self.up_vector.to(**kwargs)
return self
@property
def device(self):
return self._device
@device.setter
def device(self, value):
self._device = value
self.position = self.position.to(self.device)
self.target = self.target.to(self.device)
self.up_vector = self.up_vector.to(self.device)
| 3.125 | 3 |
jupyter_d3/scatter_plot.py | nicolemoiseyev/jupyter-d3 | 1 | 12798995 | import uuid
from textwrap import dedent
from IPython.core.display import display, HTML
from string import Template
import numpy as np
# function to initialize a scatter plot
def init_chart(data,features):
chart_id = 'mychart-' + str(uuid.uuid4())
feature_types = {} # map each feature to type
num_feature_ranges = {}
for x in features:
if data[x].dtype in ["int64", "float64"]:
feature_domain = [min(data[x].dropna()), max(data[x].dropna())]
if feature_domain[1] == feature_domain[0]:
feature_types[x] = "categorical"
else:
feature_types[x] = data[x].dtype.name
num_feature_ranges[x] = feature_domain
else:
feature_types[x] = "categorical"
display(HTML('<script src="/static/components/requirejs/require.js"></script>'))
display(HTML(Template(dedent('''
<style>
body {
font: 11px sans-serif;
color: #2A3F5E
}
.chart {
background-color: #E5ECF6;
display: relative;
}
.axis path,
.axis line {
fill: none;
stroke: #2A3F5E;
shape-rendering: crispEdges;
}
.label {
color: #2A3F5E;
}
.selection {
margin-bottom: 20px;
}
.dot {
stroke: #fff;
opacity: 0.8;
}
.grid line {
stroke: #fff;
stroke-opacity: 0.7;
stroke-width: 2px;
shape-rendering: crispEdges;
}
.grid path {
stroke-width: 0;
}
.tooltip {
position: absolute;
font-size: 12px;
width: auto;
height: auto;
pointer-events: none;
background-color: white;
padding: 5px;
}
.legend {
background-color: white;
position: absolute;
left: 650px;
top: 20px;
width: auto;
height: 500px;
}
</style>
<script>
require.config({
paths: {
'd3': 'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min',
}
})
// If we configure mychart via url, we can eliminate this define here
define($chart_id, ['d3'], function(d3) {
return function (figure_id, legend_id, select_id, data, xCat, yCat, sizeCat, axes) {
var initialFeature = d3.select("#" + select_id).property("value")
var margin = {top: 40, right: 10, bottom: 50, left: 50},
width = 650 - margin.left - margin.right,
height = 400 - margin.top - margin.bottom;
// append the svg object to the body of the page
var svg = d3.select('#' + figure_id)
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform",
"translate(" + margin.left + "," + margin.top + ")");
// X and Y scales and Axis
var x = d3.scaleLinear()
.domain(axes["x"])
.range([0, width]);
var y = d3.scaleLinear()
.domain(axes["y"])
.range([height, 0]);
// Add X-axis and label
svg
.append('g')
.attr("class", "x axis")
.attr("transform", "translate(0," + height + ")")
.call(d3.axisBottom(x))
svg.append("text")
.attr("class", "label")
.attr("x", width / 2)
.attr("y", height + 35)
.style("text-anchor", "end")
.text(xCat);
// Add Y-axis and label
svg
.append('g')
.call(d3.axisLeft(y));
svg.append("text")
.attr("class", "label")
.attr("x", -(height - 15)/ 2 )
.attr("y", -30)
.attr("transform", "rotate(-90)")
.style("text-anchor", "end")
.text(yCat);
// gridlines in x axis function
function make_x_gridlines() {
return d3.axisBottom(x)
.ticks(5)
}
// gridlines in y axis function
function make_y_gridlines() {
return d3.axisLeft(y)
.ticks(5)
}
// add grid lines
// add the X gridlines
svg.append("g")
.attr("class", "grid")
.attr("transform", "translate(0," + height + ")")
.call(make_x_gridlines()
.tickSize(-height)
.tickFormat("")
)
// add the Y gridlines
svg.append("g")
.attr("class", "grid")
.call(make_y_gridlines()
.tickSize(-width)
.tickFormat("")
)
// Add the datapoints
var dots = svg
.selectAll()
.data(data)
.enter()
.append("circle")
// Add the tooltip container to the body container
// it's invisible and its position/contents are defined during mouseover
var tooltip = d3.select("body").append("div")
.attr("class", "tooltip")
.style("opacity", 0);
// Add the legend container to the body container
var legend = d3.select("#" + legend_id).attr("y", 0);
// tooltip mouseover event handler
var tipMouseover = d => {
// x and y numeric labels
let html = xCat + ": " + Number((d[xCat]).toFixed(3)) + "<br>" + yCat + ": " + Number((d[yCat]).toFixed(3)) + "<br><br>"
// color feature label
html += colorFeature + ": " + d[colorFeature]
tooltip.html(html)
.style("left", (d3.event.pageX + 10) + "px")
.style("top", (d3.event.pageY - 15) + "px")
.transition()
.style("opacity", .9)
};
function updateLegendCat(featureColors) { // create the categorical legend
var legend = d3.select("#" + legend_id).html("") // clear current legend content
legend.append("text")
.attr("x", 15)
.attr("y", 10)
.text(colorFeature)
.attr("font-size", "14px")
let i = 0
Object.keys(featureColors).forEach(feature => {
legend.append("circle")
.attr("cx",20)
.attr("cy",30 + 20*i)
.attr("r", 4)
.style("fill", featureColors[feature])
legend.append("text")
.attr("x", 40)
.attr("y", 30 + 20*i )
.text(feature)
.style("font-size", "14px")
.attr("alignment-baseline","middle")
i += 1
})
}
function updateLegendNum(domain) { // create the continuous (numerical) legend
var legend = d3.select("#" + legend_id).html("")
var width = 30,
height = 300;
// add legend title
legend.append("text")
.attr("x", 15)
.attr("y", 10)
.text(colorFeature)
.attr("font-size", "14px")
var textHeight = 1;
var linearGradient = legend.append("defs")
.append("linearGradient")
.attr("id", "linear-gradient")
.attr("gradientTransform", "rotate(90)");
var color = d3.scaleSequential(d3.interpolatePlasma).domain([0,100])
for (let i = 0; i <= 100; i += 5)
linearGradient.append("stop")
.attr("offset", i + "%")
.attr("stop-color", color(100-i)); // to get the right orientation of gradient
const legendScale = num => {
var scale = d3.scaleLinear()
.domain([5, 0])
.range(domain)
return Number((scale(num))).toFixed(0)
}
legend.append("rect")
.attr("x", 20)
.attr("y", 30)
.attr("width", width)
.attr("height", height)
.style("fill", "url(#linear-gradient)");
for (let i = 0; i <= 5; i += 1) {
legend.append("text")
.attr("x", 55)
.attr("y", 30 + textHeight/2 + ((height-textHeight*6)/5)*i)
.text(legendScale(i))
.style("font-size", "14px")
.attr("alignment-baseline","middle");
}
}
// tooltip mouseout event handler
var tipMouseout = d => {
tooltip.transition()
.duration(0) // ms
.style("opacity", 0); // don't care about position!
};
var sizeScale = d3.scaleLinear()
.domain(sizeCat["range"])
.range([3,7])
dots.attr("class", "dot")
.attr("cx", d => x(d[xCat]) )
.attr("cy", d => y(d[yCat]) )
.attr("r", d => sizeScale(d[sizeCat["label"]]))
.on("mouseover", tipMouseover)
.on("mouseout", tipMouseout)
update(initialFeature)
// A function that update the chart with the new color coding scheme
function update(feature) {
colorFeature = feature
var colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52']
var color;
let type = $feature_types[feature];
if (type === "categorical") {
color = d3.scaleOrdinal(colors);
let featureColors = {}
dots
.attr("fill", d => {
let dotColor = color(d[feature])
featureColors[d[feature]] = dotColor
return dotColor
})
updateLegendCat(featureColors) // update the legend with the new color map
} else {
let feature_domain = $num_feature_ranges[feature]
color = d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain)
dots
.attr("fill", d => {
let dotColor = color(d[feature])
return dotColor
})
updateLegendNum(feature_domain)
}
}
d3.select("#" + select_id).on("change", function(d) {
// recover the option that has been chosen
var selectedOption = d3.select(this).property("value")
// run the updateChart function with this selected option
update(selectedOption)
});
}
})
</script>
''')).substitute({ 'chart_id': repr(chart_id),
'feature_types': repr(feature_types),
'num_feature_ranges': repr(num_feature_ranges)})))
return chart_id
def scatter_plot(data,x_cat,y_cat,axes,features):
chart_id = init_chart(data,features)
features_html_options = "".join([ f"<option value ='{x}'>{x}</option>" for x in features ])
dict_data = data.replace(np.nan, "N/A").to_dict("records")
size_cat = {
"label": "n_reads",
"range": [min(data["n_reads"]), max(data["n_reads"])]
}
display(HTML(Template(dedent('''
<div class="selection">
<label for="colorFeature"
style="display: inline-block; width: 240px; text-align: right">
<span> Color by feature: </span>
</label>
<select id=$select_id>
$options
</select>
</div>
<div style="position: relative">
<svg id=$figure_id class='chart'></svg>
<div class="legend"><svg id=$legend_id height=500 width=400></svg></div>
</div>
<script>
require([$chart_id], function(mychart) {
mychart($figure_id, $legend_id, $select_id, $data, $x_cat, $y_cat, $size_cat, $axes )
})
</script>
''')).substitute({
'chart_id': repr(chart_id),
'figure_id': repr('fig-' + str(uuid.uuid4())),
'legend_id': repr('leg-' + str(uuid.uuid4())),
'select_id': repr('sel-' + str(uuid.uuid4())),
'data': repr(dict_data),
'axes': repr(axes),
'x_cat': repr(x_cat),
'y_cat': repr(y_cat),
'size_cat': repr(size_cat),
'options': repr(features_html_options)
})))
| 2.59375 | 3 |
methods/methods.py | ZhangShuang92/csslab | 7 | 12798996 | # -*- coding:utf-8 -*-
'''
目的:提供一些文件操作 和 数据处理的方法
方法:
# 功能 # - # 方法 #
* 读取文件较大的csv - read_csv
* 获取当前目录下所有子目录 - get_subdirs
* 获取当前目录下所有该类型的文件名 - get_files
* 获取当前目录和所有子目录下所有该类型的文件名 - get_files_all
* 数据表随机长度的抽样 - random_dataframe_sample
* 计算概率密度分布 - distribution_pdf
* 计算累计概率密度分布 - distribution_cdf
* 计算频率分布 - distribution_fre
* 数据归一化到某个区间 - normlize
备注:
* 2017.10.16 - dataframe_filter方法还需要修改
* 2018.4.12 - 修改完善,oh yeah!
* 2018.11.27 - hex2rgb 与 rgb2hex,转到colorfly去了
'''
import os
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def read_csv(readpath,**kwargs):
'''
分块读取大文件的 csv
:param readpath: filepath
:return: pd.DataFrame
'''
print(' - - start to read - - %s'%readpath)
reader = pd.read_csv(readpath,iterator=True,**kwargs)
loop = True
chunkSize = 100000
chunks = []
while loop:
try:
chunk = reader.get_chunk(chunkSize)
chunks.append(chunk)
except StopIteration:
loop = False
data = pd.concat(chunks,ignore_index=True)
return data
def get_files(filedir, filetype=None, return_type='abspath'):
'''
返回当前目录下的所有该类型文件名或地址
:param filedir: str,目录
:param filetype: str,文件格式
:param return_type: 只是文件名 或 绝对地址
:return: list
'''
files = []
for filename in os.listdir(filedir):
if (filetype is None or
os.path.splitext(filename)[1] == filetype):
files.append(os.path.splitext(filename)[0])
if return_type == 'name':
return files
elif return_type == 'abspath':
files = [os.path.join(filedir, each + filetype) for each in files]
return files
return files
def get_files_all(filedir,filetype=None):
'''
返回目录和子目录下所以该类型的文件列表
:param filedir: str,目录
:param filetype: str,文件格式
:return: list
'''
files = []
for each in os.walk(filedir):
if len(each[-1]) >= 1:
for file_i in each[-1]:
if(filetype is None
or os.path.splitext(file_i)[1] == filetype)
files.append(os.path.join(each[0], file_i))
return files
def get_subdir(sup_dir):
sub_dirs = []
for subdir in os.listdir(sup_dir):
abs_path = os.path.join(sup_dir,subdir)
if os.path.isdir(abs_path):
sub_dirs.append(abs_path)
return sub_dirs
def random_dataframe_sample(df, num):
'''
返回dataframe的随机数量的样本,不放回。
如果出现KeyError的话,把下面的 df.ix 改成 df.loc 试试 !
:param df: Dataframe,数据
:param num: 样本数量,也可以是比例,例如 0.2
:return: Dataframe
'''
length = len(df)
if num < 1:
num = int(length * num)
inds = list(df.index)
if num <= length:
ind_sample = random.sample(inds, num)
df_sample = df.loc[ind_sample, :]
else:
df_sample = df
return df_sample
def distribution_fre(data):
'''
计算数据的频率密度分布,最后的概率值加起来都等于1
:param data: list 或者 pandas.Series.
:return: pandas.Series
'''
if data is None:
return None
if not isinstance(data, pd.Series):
data = pd.Series(data)
data_count = data.value_counts().sort_index()
data_p = data_count / data_count.sum()
return data_p
def distribution_pdf(data, bins=None):
'''
用频率密度直方图来估计概率密度分布
:param data: 数据
:return: data_pdf,pandas.Series
'''
if data is None:
return None
if bins is None:
bins = 200
if isinstance(data,pd.Series):
data = data.values
density, xdata = np.histogram(data, bins=bins, density=True)
xdata = (xdata + np.roll(xdata, -1))[:-1] / 2.0
data_pdf = pd.Series(density, index=xdata)
return data_pdf
def distribution_cdf(data, bins=None):
pdf = distribution_pdf(data, bins)
cdf = []
for ind in pdf.index:
cdf.append(pdf[:ind].sum())
cdf = pd.Series(cdf, index=pdf.index)
cdf = cdf / cdf.max()
return cdf
def plot_distribution(data, subplot=2, data_norm=False, cmp=False, grid=True):
'''
:param data: Series数据
:param subplot: 绘制原始的,log 和 log-log
:param data_norm: 数据是否归一化,例如normlized degree
:param cmp: 是否绘制累计密度概率
:param grid: 网格线是否显示
:return: None
'''
if data_norm:
data_normed = normlize(data.values,0,1)
name = 'Normlized'+ str(data.name)
data = pd.Series(data_normed,name=name)
ylabel = 'Prob'
if cmp:
data = distribution_cdf(data)
ylabel = 'CCDF'
else:
data = distribution_pdf(data)
fg = plt.figure()
axes = []
for i in range(subplot):
axes.append(fg.add_subplot(1,subplot,i+1))
data.plot(ax=axes[0], style='*-')
axes[0].set_title('Distribution')
if subplot>=2:
data.plot(ax=axes[1], style='*', logy=True, logx=True)
axes[1].set_title('log-log')
#axes[1].set_xlim([0, 50])
if subplot>=3:
data.plot(ax=axes[2], style='*-', logy=True)
axes[2].set_title('semi-log')
for i in range(subplot):
axes[i].set_ylabel(ylabel)
axes[i].set_xlabel(data.name)
axes[i].set_xlim([0, max(data.index)*1.1])
axes[i].grid(grid, alpha=0.8)
return axes
def normlize(data,lower=0,upper=1):
'''
将数据规范化到某个区间
:param data: 可以是list,array, ndarray等
:param lower: 规范化的下界
:param upper: 规范化的上界
:return: 规范化的数据
'''
xmax = np.max(data)
xmin = np.min(data)
data_new = (upper - lower) * (data - xmin) / (xmax - xmin) + lower
return data_new
| 2.765625 | 3 |
yardstick/benchmark/scenarios/lib/create_image.py | upfront710/yardstick | 28 | 12798997 | ##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import logging
from yardstick.benchmark.scenarios import base
from yardstick.common import openstack_utils
from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
class CreateImage(base.Scenario):
"""Create an OpenStack image"""
__scenario_type__ = "CreateImage"
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.options = self.scenario_cfg["options"]
self.name = self.options["image_name"]
self.file_name = self.options.get("file_name")
self.container = self.options.get("container", 'images')
self.md5 = self.options.get("md5")
self.sha256 = self.options.get("sha256")
self.disk_format = self.options.get("disk_format")
self.container_format = self.options.get("container_format",)
self.disable_vendor_agent = self.options.get("disable_vendor_agent", True)
self.wait = self.options.get("wait", True)
self.timeout = self.options.get("timeout", 3600)
self.allow_duplicates = self.options.get("allow_duplicates", False)
self.meta = self.options.get("meta")
self.volume = self.options.get("volume")
self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
def setup(self):
"""scenario setup"""
self.setup_done = True
def run(self, result):
"""execute the test"""
if not self.setup_done:
self.setup()
image_id = openstack_utils.create_image(
self.shade_client, self.name, filename=self.file_name,
container=self.container, md5=self.md5, sha256=self.sha256,
disk_format=self.disk_format,
container_format=self.container_format,
disable_vendor_agent=self.disable_vendor_agent, wait=self.wait,
timeout=self.timeout, allow_duplicates=self.allow_duplicates,
meta=self.meta, volume=self.volume)
if not image_id:
result.update({"image_create": 0})
LOG.error("Create image failed!")
raise exceptions.ScenarioCreateImageError
result.update({"image_create": 1})
LOG.info("Create image successful!")
keys = self.scenario_cfg.get("output", '').split()
values = [image_id]
return self._push_to_outputs(keys, values)
| 1.859375 | 2 |
tests/blueprints/test_network.py | tssujt/remote | 67 | 12798998 | <reponame>tssujt/remote
from unittest.mock import patch
import pyipip
from dns.exception import DNSException
from remote.blueprints.network import IPRecord
from tests.test_app import TestBase
class TestNetwork(TestBase):
_test_domain = "example.com"
_test_ip = "127.0.0.1"
def test_ip(self):
response = self.client.get("/ip")
self.assertEqual(response.status_code, 200)
_json_body = response.json
self.assertIn("success", _json_body)
self.assertEqual(_json_body["ip"], response.request.remote_addr)
self.assertIn("ipv4", _json_body)
self.assertIn("ipv6_available", _json_body)
def test_ipip(self):
ipdb_func = "remote.blueprints.network._get_ipdb"
mock_func = "remote.blueprints.network.pyipip.IPIPDatabase.lookup"
fake_ip_fields = "\t".join(map(str, range(len(IPRecord._fields) - 2)))
with (
patch(ipdb_func, return_value=pyipip.IPIPDatabase),
patch(mock_func, return_value=fake_ip_fields),
):
response = self.client.get(f"/ipip/{self._test_ip}")
self.assertEqual(response.status_code, 200)
_json_body = response.json
self.assertIn("success", _json_body)
self.assertIn("continent_code", _json_body)
self.assertIn("cn_division_code", _json_body)
self.assertEqual(set(_json_body.keys()), set(IPRecord._fields))
def test_ipip_error_1(self):
response = self.client.get("/ipip/FF:FF")
self.assertEqual(response.status_code, 400)
_json_body = response.json
self.assertIn("IPv6", _json_body["message"])
self.assertEqual(_json_body["status"], "error")
self.assertFalse(_json_body["success"])
def test_ipip_error_2(self):
response = self.client.get("/ipip/FAKE_IP")
self.assertEqual(response.status_code, 400)
_json_body = response.json
self.assertIn("Invalid", _json_body["message"])
self.assertEqual(_json_body["status"], "error")
self.assertFalse(_json_body["success"])
def test_ipip_error_3(self):
ipdb_func = "remote.blueprints.network._get_ipdb"
with patch(ipdb_func, side_effect=Exception("Mock Exception")):
response = self.client.get(f"/ipip/{self._test_ip}")
self.assertEqual(response.status_code, 404)
_json_body = response.json
self.assertIn("not found", _json_body["message"])
self.assertEqual(_json_body["status"], "error")
self.assertFalse(_json_body["success"])
def test_resolve(self):
response = self.client.get(f"/dns/resolve?domain={self._test_domain}")
self.assertEqual(response.status_code, 200)
_json_body = response.json
self.assertIn("nameservers", _json_body)
self.assertIn("ttl", _json_body)
self.assertIsInstance(_json_body["answers"], list)
def test_resolve_error_1(self):
response = self.client.get("/dns/resolve")
self.assertEqual(response.status_code, 400)
_json_body = response.json
self.assertEqual("error", _json_body["status"])
self.assertFalse(_json_body["success"])
self.assertIn("missing", _json_body["message"])
def test_resolve_error_2(self):
mock_func = "remote.blueprints.network.dns.resolver.Resolver.resolve"
with patch(mock_func, side_effect=DNSException("Mock Exception")):
response = self.client.get(f"/dns/resolve?domain={self._test_domain}")
self.assertEqual(response.status_code, 400)
_json_body = response.json
self.assertEqual("error", _json_body["status"])
self.assertFalse(_json_body["success"])
self.assertIn("Unable", _json_body["message"])
| 2.546875 | 3 |
api/Mongo/abc_dbops.py | AutoCoinDCF/NEW_API | 0 | 12798999 | <filename>api/Mongo/abc_dbops.py<gh_stars>0
'''AbstractDbOps: 数据库操作接口抽象类
@author: <EMAIL>
@last_modified: 2019.6.20
'''
from abc import ABC, abstractmethod
class AbstractDbOps(ABC):
@abstractmethod
def __init__(self):
'''初始化
eg: Initialize db instance
'''
raise NotImplementedError
| 2.234375 | 2 |
card.py | akaeme/BlackJackBot | 0 | 12799000 | #encoding: utf8
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__license__ = "GPL"
__version__ = "0.1"
import random
class Card(object):
suit_names = ["♠️", "♣️", "♦️", "♥️"]
rank_names = [None, "Ace", "2", "3", "4", "5", "6", "7", "8", "9", "10", "Jack", "Queen", "King"]
def __init__(self, suit=0, rank=1):
self.suit = suit
self.rank = rank
def __str__(self):
"""Returns a human-readable string representation."""
return '{}{} '.format(Card.rank_names[self.rank], Card.suit_names[self.suit])
def __repr__(self):
return self.__str__()
def value(self):
return self.rank if self.rank < 10 else 10
def is_ace(self):
if self.rank == 1:
return True
return False
def is_ten(self):
if self.rank >= 10:
return True
return False
def value(hand):
v = sum([c.value() for c in hand])
if len([c for c in hand if c.is_ace()]) > 0 and v <= 11: #if there is an Ace and we don't bust by take the Ace as an eleven
return v+10
return v
def blackjack(hand):
if len(hand) == 2 and hand[0].is_ace() and hand[1].is_ten():
return True
if len(hand) == 2 and hand[1].is_ace() and hand[0].is_ten():
return True
return False
if __name__ == '__main__':
shoe = Shoe()
shoe.shuffle()
print(shoe.deal_cards(2))
| 3.890625 | 4 |
Aula04/Exemplo_Arquivos.py | MateusPeschke/CursoPython | 0 | 12799001 | <reponame>MateusPeschke/CursoPython
# "r" read - abre o arquivo
# "w" write - cria ou substitui um arquivo
# "a" append - adiciona novas informações
import matplotlib.pyplot as plt
import numpy as np
arquivo = open(r"C:\Users\67184\Documents\Desenvolvimento_Agil_em_Python_1_2020\aula4\exemplos\cadastro.txt","r")
lista_cadastro = []
for pessoa in arquivo:
pessoa = pessoa.strip()
pessoa = pessoa.split(';')
lista_cadastro.append(pessoa)
arquivo.close()
# for pessoa in lista_cadastro:
# if pessoa[0].upper() == '300':
# print(pessoa)
# break
# for pessoa in lista_cadastro:
# if pessoa[3].upper() == 'F':
# print(pessoa)
# # break
# for pessoa in lista_cadastro:
# if not ('0' in pessoa[0]) and not ('A' in pessoa[1]):
# print(pessoa)
# # break
mulher = []
contador_mulher = 0
for pessoa in lista_cadastro:
if pessoa[3] == 'f':
mulher.append(pessoa)
contador_mulher = contador_mulher + 1
# break
arquivo = open(r'C:\Users\67184\Documents\Desenvolvimento_Agil_em_Python_1_2020\aula4\exemplos\mulher.txt','w')
for pessoa in mulher:
pessoa_str = ';'.join(pessoa)+"\n"
arquivo.write(pessoa_str)
arquivo.close()
homens = []
contador_homens = 0
for pessoa in lista_cadastro:
if pessoa[3] == 'm':
homens.append(pessoa)
contador_homens = contador_homens + 1
# break
arquivo = open(r'C:\Users\67184\Documents\Desenvolvimento_Agil_em_Python_1_2020\aula4\exemplos\homens.txt','w')
for pessoa in homens:
pessoa_str = ';'.join(pessoa)+"\n"
arquivo.write(pessoa_str)
arquivo.close()
maior_idade = []
contador_maior = 0
for pessoa in lista_cadastro:
if int(pessoa[2]) >= 18:
maior_idade.append(pessoa)
contador_maior = contador_maior + 1
# break
arquivo = open(r'C:\Users\67184\Documents\Desenvolvimento_Agil_em_Python_1_2020\aula4\exemplos\maior_idade.txt','w')
for pessoa in maior_idade:
pessoa_str = ';'.join(pessoa)+"\n"
arquivo.write(pessoa_str)
arquivo.close()
menor_idade = []
contador_menor = 0
for pessoa in lista_cadastro:
if int(pessoa[2]) < 18:
menor_idade.append(pessoa)
contador_menor = contador_menor + 1
# break
arquivo = open(r'C:\Users\67184\Documents\Desenvolvimento_Agil_em_Python_1_2020\aula4\exemplos\menor_idade.txt','w')
for pessoa in menor_idade:
pessoa_str = ';'.join(pessoa)+"\n"
arquivo.write(pessoa_str)
arquivo.close()
x = np.linspace(0,2 * np.pi,400)
y = np.sin(x ** 2)
genero = ("Homens", "Mulheres")
quantidade_genero = (contador_homens,contador_mulher)
Idade = ('maior','menor')
quantidade_idade = (contador_maior,contador_menor)
plt.subplot(2,1,1)
plt.bar(genero,quantidade_genero)
plt.subplot(2,1,2)
plt.bar(Idade,quantidade_idade)
plt.show() | 3.296875 | 3 |
models.py | foamliu/i-Cloud | 1 | 12799002 | <filename>models.py
import math
import torch
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch import nn
from torch.nn import Parameter
from config import device, num_classes
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBlock(nn.Module):
def __init__(self, channel, reduction=16):
super(SEBlock, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.PReLU(),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
class IRBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True):
super(IRBlock, self).__init__()
self.bn0 = nn.BatchNorm2d(inplanes)
self.conv1 = conv3x3(inplanes, inplanes)
self.bn1 = nn.BatchNorm2d(inplanes)
self.prelu = nn.PReLU()
self.conv2 = conv3x3(inplanes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.use_se = use_se
if self.use_se:
self.se = SEBlock(planes)
def forward(self, x):
residual = x
out = self.bn0(x)
out = self.conv1(out)
out = self.bn1(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.use_se:
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.prelu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, use_se=True, im_size=112):
self.inplanes = 64
self.use_se = use_se
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.prelu = nn.PReLU()
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.bn2 = nn.BatchNorm2d(512)
self.dropout = nn.Dropout()
if im_size == 112:
self.fc = nn.Linear(512 * 7 * 7, 512)
else: # 224
self.fc = nn.Linear(512 * 14 * 14, 512)
self.bn3 = nn.BatchNorm1d(512)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se))
self.inplanes = planes
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, use_se=self.use_se))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.prelu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn2(x)
x = self.dropout(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = self.bn3(x)
return x
def resnet18(args, **kwargs):
model = ResNet(IRBlock, [2, 2, 2, 2], use_se=args.use_se, **kwargs)
if args.pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(args, **kwargs):
model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, **kwargs)
if args.pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(args, **kwargs):
model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, im_size=args.im_size, **kwargs)
if args.pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(args, **kwargs):
model = ResNet(IRBlock, [3, 4, 23, 3], use_se=args.use_se, **kwargs)
if args.pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(args, **kwargs):
model = ResNet(IRBlock, [3, 8, 36, 3], use_se=args.use_se, **kwargs)
if args.pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
class ArcMarginModel(nn.Module):
def __init__(self, args):
super(ArcMarginModel, self).__init__()
self.weight = Parameter(torch.FloatTensor(num_classes, args.emb_size))
nn.init.xavier_uniform_(self.weight)
self.easy_margin = args.easy_margin
self.m = args.margin_m
self.s = args.margin_s
self.cos_m = math.cos(self.m)
self.sin_m = math.sin(self.m)
self.th = math.cos(math.pi - self.m)
self.mm = math.sin(math.pi - self.m) * self.m
def forward(self, input, label):
x = F.normalize(input)
W = F.normalize(self.weight)
cosine = F.linear(x, W)
sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
phi = cosine * self.cos_m - sine * self.sin_m # cos(theta + m)
if self.easy_margin:
phi = torch.where(cosine > 0, phi, cosine)
else:
phi = torch.where(cosine > self.th, phi, cosine - self.mm)
one_hot = torch.zeros(cosine.size(), device=device)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
output *= self.s
return output
from torchvision import models
class FrameDetectionModel(nn.Module):
def __init__(self):
super(FrameDetectionModel, self).__init__()
resnet = models.resnet50(pretrained=True)
# Remove linear layer (since we're not doing classification)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.fc = nn.Linear(2048, 8)
self.sigmoid = nn.Sigmoid()
def forward(self, images):
x = self.resnet(images) # [N, 2048, 1, 1]
x = x.view(-1, 2048) # [N, 2048]
x = self.fc(x)
x = self.sigmoid(x) # [N, 8]
return x
class FaceAttributeModel(nn.Module):
def __init__(self):
super(FaceAttributeModel, self).__init__()
resnet = models.resnet50(pretrained=True)
# Remove linear and pool layers (since we're not doing classification)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.fc = nn.Linear(2048, 17)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=-1)
def forward(self, images):
x = self.resnet(images) # [N, 2048, 1, 1]
x = x.view(-1, 2048) # [N, 2048]
x = self.fc(x)
reg = self.sigmoid(x[:, :5]) # [N, 8]
expression = self.softmax(x[:, 5:8])
gender = self.softmax(x[:, 8:10])
glasses = self.softmax(x[:, 10:13])
race = self.softmax(x[:, 13:17])
return reg, expression, gender, glasses, race
class FaceExpressionModel(nn.Module):
def __init__(self):
super(FaceExpressionModel, self).__init__()
resnet = models.resnet101(pretrained=True)
# Remove linear and pool layers (since we're not doing classification)
modules = list(resnet.children())[:-2]
self.resnet = nn.Sequential(*modules)
self.dropout = nn.Dropout()
self.avgpool = nn.AvgPool2d(kernel_size=4)
self.fc = nn.Linear(2048, 7)
self.softmax = nn.Softmax(dim=-1)
def forward(self, images):
x = self.resnet(images) # [N, 2048, 1, 1]
x = self.dropout(x)
x = self.avgpool(x)
x = x.view(-1, 2048) # [N, 2048]
x = self.fc(x)
out = self.softmax(x)
return out
class EastModel(nn.Module):
def __init__(self, args):
super(EastModel, self).__init__()
if args.network == 'r18':
self.resnet = resnet18(args)
elif args.network == 'r34':
self.resnet = resnet34(args)
elif args.network == 'r50':
self.resnet = resnet50(args)
elif args.network == 'r101':
self.resnet = resnet101(args)
else: # args.network == 'r152':
self.resnet = resnet152(args)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.conv1 = nn.Conv2d(in_channels=3072, out_channels=128, kernel_size=1)
self.bn1 = nn.BatchNorm2d(128)
self.conv2 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(128)
self.conv3 = nn.Conv2d(in_channels=640, out_channels=64, kernel_size=1)
self.bn3 = nn.BatchNorm2d(64)
self.conv4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.conv5 = nn.Conv2d(in_channels=320, out_channels=64, kernel_size=1)
self.bn5 = nn.BatchNorm2d(64)
self.conv6 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=1)
self.bn6 = nn.BatchNorm2d(32)
self.conv7 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1)
self.bn7 = nn.BatchNorm2d(32)
self.conv8 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1)
self.conv9 = nn.Conv2d(in_channels=32, out_channels=4, kernel_size=1)
self.conv10 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1)
self.unpool1 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
self.unpool2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
self.unpool3 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
def forward(self, images):
_, f = self.resnet(images)
h = f[3] # bs 2048 w/32 h/32
g = (self.unpool1(h)) # bs 2048 w/16 h/16
c = self.conv1(torch.cat((g, f[2]), 1))
c = self.bn1(c)
c = self.relu(c)
h = self.conv2(c) # bs 128 w/16 h/16
h = self.bn2(h)
h = self.relu(h)
g = self.unpool2(h) # bs 128 w/8 h/8
c = self.conv3(torch.cat((g, f[1]), 1))
c = self.bn3(c)
c = self.relu(c)
h = self.conv4(c) # bs 64 w/8 h/8
h = self.bn4(h)
h = self.relu(h)
g = self.unpool3(h) # bs 64 w/4 h/4
c = self.conv5(torch.cat((g, f[0]), 1))
c = self.bn5(c)
c = self.relu(c)
h = self.conv6(c) # bs 32 w/4 h/4
h = self.bn6(h)
h = self.relu(h)
g = self.conv7(h) # bs 32 w/4 h/4
g = self.bn7(g)
g = self.relu(g)
score = self.conv8(g) # bs 1 w/4 h/4
score = self.sigmoid(score)
geo_map = self.conv9(g)
geo_map = self.sigmoid(geo_map) * 512
angle_map = self.conv10(g)
angle_map = self.sigmoid(angle_map)
angle_map = (angle_map - 0.5) * math.pi / 2
geo = torch.cat((geo_map, angle_map), 1) # bs 5 w/4 w/4
return score, geo
| 2.484375 | 2 |
src/test/data_pipe_test/basic_index_test.py | random-python/data_pipe | 14 | 12799003 | """
"""
from data_pipe.basic_index import *
from data_pipe_test.verify_index import *
def test_log2():
print()
assert integer_log2(1) == 0
assert integer_log2(2) == 1
assert integer_log2(4) == 2
assert integer_log2(8) == 3
def test_index_store():
index_store = BasicIndex()
verify_index(index_store)
| 1.984375 | 2 |
mopidy_radioworld/radioworld.py | anabolyc/Mopidy-RadioWorld | 0 | 12799004 | <filename>mopidy_radioworld/radioworld.py
import requests
from time import sleep
from contextlib import closing
import logging
logger = logging.getLogger(__name__)
class RadioWorld(object):
def __init__(self):
self._base_uri = 'https://radioworld-api-prod.azurewebsites.net/%s'
#self._base_uri = 'http://localhost:5000/%s'
self._session = requests.Session()
def root(self):
results = [{
'type': 'directory',
'schema': 'rnd',
'id': None,
'text': 'Feeling lucky'
}]
countries = self._query('countries')
if countries is not None:
for location in sorted(countries, key=lambda loc: loc['name']):
results.append({
'type': 'directory',
'schema': 'location',
'id': location['id'],
'text': location['name']
})
return results
def rnd(self):
station = self._query('station/rnd')
if station is None:
return []
return {
'type': 'track',
'schema': 'station',
'id': station['id'],
'text': station['text']
}
def stations(self, id):
stations = self._query('location/{}/stations'.format(id))
if stations is None or stations == []:
logger.warning("empty response from API")
return []
results = []
for station in sorted(stations, key=lambda sta: sta['text']):
results.append({
'type': 'track',
'schema': 'station',
'id': station['id'],
'text': station['text']
})
return results
def station(self, id):
station = self._query('station/%s' % id)
if station is None:
logger.warning("empty response from API")
return station
def image(self, id):
return self._base_uri % ("station/{}/image".format(id))
def search(self, q, location_id):
results = []
search = self._query("stations/search/{}".format(q)) if location_id is None else self._query("location/{}/search/{}".format(location_id, q))
stations = search['stations']
for station in stations:
results.append({
'type': 'track',
'schema': 'station',
'id': station['id'],
'text': station['text']
})
return results
def _query(self, path):
uri = (self._base_uri % path)
logger.info('RadioWorld request: %s', uri)
try:
while True:
with closing(self._session.get(uri)) as r:
r.raise_for_status()
logger.debug("RadioWorld response: %s", r.status_code)
if r.status_code != 204:
return r.json()
else:
sleep(0.25)
except Exception as e:
logger.info('RadioWorld API request for %s failed: %s' % (path, e))
return None
| 2.59375 | 3 |
pingsweeper.py | allanvictor/pingsweeper | 0 | 12799005 | <filename>pingsweeper.py
#!/usr/bin/python
import logging
logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
from scapy.all import *
from netaddr import *
import socket
import sys, getopt
def getlive():
for i in live:
print('live {0}'.format(i[0].dst))
def getdead():
for j in dead:
print('dead {0}'.format(j[0].dst))
def usage():
print('Usage: pingsweeper [options] <ip>\n\t-l show live hosts\n\t-d show dead hosts\n\n--Development By <NAME>--')
exit()
if(len(sys.argv)>1):
opts, args = getopt.getopt(sys.argv[1:],'ldh')
try:
target=args[0]
except:
usage()
try:
ip_range=IPNetwork(target)
for ip in ip_range:
if str(ip_range.cidr) != str(ip)+'/32':
if ip == ip_range.network or ip == ip_range.broadcast:
continue
live,dead=sr(IP(dst=str(ip))/ICMP(), timeout=1, verbose=0)
if not opts:
getlive()
getdead()
else:
for opt in opts:
if ('-l' in opt):
getlive()
elif ('-d' in opt):
getdead()
elif ('h' in opt):
usage()
except:
logging.getLogger(__name__).warning('ERROR:Illegal IP')
usage()
else:
usage()
| 2.53125 | 3 |
build.py | Lzhiyong/android-sdk-tools | 22 | 12799006 | #!/usr/bin/env python
#
# Copyright © 2022 Github Lzhiyong
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=not-callable, line-too-long, no-else-return
import time
import argparse
import subprocess
from pathlib import Path
def format_time(seconds):
minute, sec = divmod(seconds, 60)
hour, minute = divmod(minute, 60)
if hour > 0:
return "{}h{:02d}m{:02d.2f}s".format(hour, minute, sec)
elif minute > 0:
return "{}m{:02d.2f}s".format(minute, sec)
else:
return "{:.2f}s".format(sec)
def build(cc, cxx, args):
command = ["cmake", "-GNinja",
"-B {}".format(args.build),
"-DCMAKE_C_COMPILER={}".format(cc),
"-DCMAKE_CXX_COMPILER={}".format(cxx),
"-DTARGET_ABI={}".format(args.arch),
"-DCMAKE_BUILD_TYPE=Release"]
if args.protoc is not None and len(str(args.protoc)) > 0:
command.append("-DPROTOC_PATH={}".format(args.protoc))
result = subprocess.run(command)
start = time.time()
if result.returncode == 0:
if args.target == "all":
result = subprocess.run(["ninja", "-C", args.build, "-j {}".format(args.job)])
else:
result = subprocess.run(["ninja", "-C", args.build, args.target, "-j {}".format(args.job)])
if result.returncode == 0:
end = time.time()
print("\033[1;32mbuild success cost time: {}\033[0m".format(format_time(end - start)))
def configure(args):
ndk = Path(args.ndk)
if not ndk.exists() or not ndk.is_dir():
raise ValueError("cannot find the ndk")
toolchain = ndk / "toolchains/llvm/prebuilt/linux-x86_64"
cc: Path = Path()
cxx: Path = Path()
if args.arch == "aarch64":
cc = toolchain / "bin" / "aarch64-linux-android{}-clang".format(args.api)
cxx = toolchain / "bin" / "aarch64-linux-android{}-clang++".format(args.api)
elif args.arch == "arm":
cc = toolchain / "bin" / "armv7a-linux-androideabi{}-clang".format(args.api)
cxx = toolchain / "bin" / "armv7a-linux-androideabi{}-clang++".format(args.api)
elif args.arch == "x86":
cc = toolchain / "bin" / "i686-linux-android{}-clang".format(args.api)
cxx = toolchain / "bin" / "i686-linux-android{}-clang++".format(args.api)
else:
cc = toolchain / "bin" / "x86_64-linux-android{}-clang".format(args.api)
cxx = toolchain / "bin" / "x86_64-linux-android{}-clang++".format(args.api)
if not cc.exists() or not cxx.exists():
print("cc is {}".format(cc))
print("cxx is {}".format(cxx))
raise ValueError("error: cannot find the clang compiler")
# start building
build(str(cc), str(cxx), args)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--ndk", required=True, help="set the ndk toolchain path")
parser.add_argument("--arch", choices=["aarch64", "arm", "x86", "x86_64"],
required=True, help="build for the specified architecture")
parser.add_argument("--api", default=30, help="set android platform level, min api is 30")
parser.add_argument("--build", default="build", help="the build directory")
parser.add_argument("--job", default=16, help="run N jobs in parallel, default is 16")
parser.add_argument("--target", default="all", help="build specified targets such as aapt2 adb fastboot, etc")
parser.add_argument("--protoc", help="set the host protoc path")
args = parser.parse_args()
configure(args)
if __name__ == "__main__":
main()
| 2.234375 | 2 |
airflow_ml_dags/dags/model_train.py | made-ml-in-prod-2021/liliyamakhmutova- | 0 | 12799007 | import pathlib
from datetime import timedelta
from airflow import DAG
from airflow.operators.python import PythonOperator
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import pickle
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
import json
from airflow.utils.dates import days_ago
def _preprocess_data():
data_df = pd.read_csv("/opt/airflow/data/raw/{{ ds }}/data.csv")
target_df = pd.read_csv("/opt/airflow/data/raw/{{ ds }}/target.csv")
print(f"data before transform: {data_df}")
data_df.drop(columns=["fbs"], inplace=True)
data_df["target"] = target_df
print(f"data after transform: {data_df}")
pathlib.Path("/opt/airflow/data/processed/{{ ds }}").mkdir(parents=True, exist_ok=True)
processed_path = "/opt/airflow/data/processed/{{ ds }}/data.csv"
print(f"saving processed data to {processed_path}")
data_df.to_csv(processed_path, index=False)
def _train_val_split():
data = pd.read_csv("/opt/airflow/data/processed/{{ ds }}/data.csv")
train_data, test_data = train_test_split(data, train_size=0.8)
train_data.to_csv("/opt/airflow/data/processed/{{ ds }}/train.csv", index=False)
test_data.to_csv("/opt/airflow/data/processed/{{ ds }}/test.csv", index=False)
def _train_model():
train_data = pd.read_csv("/opt/airflow/data/processed/{{ ds }}/train.csv")
target = train_data["target"]
train_data.drop(columns=["target"], inplace=True)
transformer = ColumnTransformer(
[
(
'num',
Pipeline([('scaler', StandardScaler())]),
["age", "trestbps", "chol", "thalach", "oldpeak"],
),
(
'cat',
Pipeline([('onehot', OneHotEncoder(handle_unknown='ignore'))]),
["sex", "cp", "restecg", "exang", "slope", "ca", "thal"],
),
]
)
transformer.fit_transform(train_data)
model = LogisticRegression()
model.fit(train_data, target)
pathlib.Path("/opt/airflow/data/models/{{ ds }}").mkdir(parents=True, exist_ok=True)
with open("/opt/airflow/data/models/{{ ds }}/model.pkl", "wb") as f:
pickle.dump(model, f)
with open("/opt/airflow/data/models/{{ ds }}/transformer.pkl", "wb") as f:
pickle.dump(transformer, f)
def _test_model():
test_data = pd.read_csv("/opt/airflow/data/processed/{{ ds }}/test.csv")
target = test_data["target"]
test_data.drop(columns=["target"], inplace=True)
model = pickle.load(open("/opt/airflow/data/models/{{ ds }}/model.pkl", "rb"))
transformer = pickle.load(open("/opt/airflow/data/models/{{ ds }}/transformer.pkl", "rb"))
transformer.transform(test_data)
predicts = model.predict(test_data)
metrics = {
"accuracy": accuracy_score(target, predicts),
"f1": f1_score(target, predicts),
"roc_auc": roc_auc_score(target, predicts),
}
pathlib.Path("/opt/airflow/data/metrics/{{ ds }}").mkdir(parents=True, exist_ok=True)
with open("/opt/airflow/data/metrics/{{ ds }}/metrics.json", "w") as metric_file:
json.dump(metrics, metric_file)
with DAG(
dag_id="model_train",
description="This DAG trains model on synthetic data",
start_date=days_ago(0),
schedule_interval=timedelta(days=1),
) as dag:
preprocess_data = PythonOperator(
task_id="data_preprocessing",
python_callable=_preprocess_data,
dag=dag,
)
train_val_split = PythonOperator(
task_id="split_data",
python_callable=_train_val_split,
dag=dag
)
train_model = PythonOperator(
task_id="train_model",
python_callable=_train_model,
dag=dag
)
test_model = PythonOperator(
task_id="test_model",
python_callable=_test_model,
dag=dag
)
preprocess_data >> train_val_split >> train_model >> test_model
| 2.5 | 2 |
core/utils.py | mannyfin/aioget | 1 | 12799008 | <gh_stars>1-10
import os
from pathlib import Path
import shutil
import json
import asyncio
import concurrent.futures
from functools import partial
from datetime import datetime, timedelta
from typing import Union, List, Coroutine, Callable, Tuple, Optional
from multiprocessing import cpu_count
import pickle
from deprecated.sphinx import deprecated
from configs.base import consts
from core import logger
basiclogger = logger.rabbit_logger(__name__)
def pop_arb_field_if_exists(msg: dict) -> Tuple[dict, dict]:
"""values in the arb field exepected to be a dictionary."""
if 'arb' in msg:
arb = msg.pop('arb')
return arb, msg
return {}, msg
def set_arb(msg: dict, arb: dict) -> dict:
if arb:
msg['arb'] = arb
return msg
def load_config(path: str) -> dict:
"""
Provide path to load the JSON config
Args:
path: str, should be path to JSON file
Returns:
Any JSON-serializable data. Usually a dict for the config files.
"""
with open(path, 'r') as f:
config = json.load(f)
return config
def make_config(paths):
configs = {}
for fp in paths:
business_driver = Path(fp).parent.stem #os.path.split(fp[0])[1] #fp.rsplit('/', 2)[1]
if business_driver not in configs:
configs[business_driver] = {}
newconfig = load_config(fp)
for key, val in newconfig.items():
configs[business_driver][key] = val
return configs
def load_model(path: str, mode: str = 'rb', response_encoding=None):
with open(path, mode) as f:
model = pickle.load(f)
return model
def merge_configs(driver: dict, client: dict) -> dict:
"""
Merge Driver and Client config. The Client configs will overwrite matching keys in the Driver config.
Args:
driver (dict): driver dictionary of configs
client (dict): client dictionary of configs
Returns:
Merged configs (dict)
"""
return {**driver, **client}
# def process_pool(workers: int,
# func: Callable,
# iterable: Union[list, tuple, asyncio.Queue]) -> List[Coroutine]:
# """
# Pass an iterable to a process pool and return a list of asyncio futures.
#
# Args:
# workers: Number of workers in the Process Pool
# func: function
# iterable: unique values you will pass to each process
# args: additional values passed to every process
# kwargs: additional values passed to every process
#
# Returns:
# List of asyncio.Futures
#
# Examples:
#
# .. code-block:: python
# :linenos:
#
# def cpu_bound_func(a, b=b):
# # CPU-bound operations will block the event loop:
# # in general it is preferable to run them in a
# # process pool. Simulating this. with arg and kwarg.
# time.sleep(1)
# return a**2, b*-1
#
# def async_process_pool(workers: int, func: Callable, iterable, *args, **kwargs) -> list:
# if workers <= 0:
# workers = cpu_count()
# loop = asyncio.get_running_loop()
# with concurrent.futures.ProcessPoolExecutor(workers) as pool:
# return [loop.run_in_executor(pool, partial(func, _ , *args, **kwargs)) for _ in iterable]
#
# # submitting futures to the process pool and getting results as completed. Not necessarily in order.
# async def exhaust_async_process_pool():
# for _ in asyncio.as_completed(async_process_pool(0, cpu_bound_func, list(range(8)), b=2)):
# result = await _
# print(result)
#
# start = time.time()
# asyncio.run(exhaust_async_process_pool())
# end = time.time() - start
# print(end) # should take a littler longer than math.ceil(8/workers) due to process overhead.
#
#
# Output:
#
# (1, -2)
# (0, -2)
# (9, -2)
# (4, -2)
# (16, -2)
# (25, -2)
# (36, -2)
#
# .. todo:: make this work with async queues correctly...
# """
# if workers <= 0:
# workers = cpu_count()
# loop = asyncio.get_running_loop()
# with concurrent.futures.ProcessPoolExecutor(workers) as pool:
# if isinstance(iterable, (list, tuple)):
# return [loop.run_in_executor(pool, partial(func, **value)) for value in iterable]
# elif isinstance(iterable, asyncio.Queue):
# # todo make this work
# futures = []
# for ctr in range(iterable.qsize()):
# value = iterable.get_nowait()
# futures.append(loop.run_in_executor(pool, partial(func, **value)))
# iterable.task_done()
# return futures
async def parse_consumer(next_queue: asyncio.Queue, write_queue: asyncio.Queue,
func: Optional[Callable] = None):
"""
Parses the response html in a concurrent.futures.ProcessPoolExcecutor Process Pool. This function checks if
next_queue is empty. If it is not, then it empties it by getting each item in next_queue and passing to the
Process Pool and returing a future. The future is then put on the write_queue.
If queue's requests are completed and next_queue has completed (i.e. no unfinished tasks in either queue),
then break.
.. todo:: this could be refactored by async_queue.worker
Args:
next_queue: queue containing the responses
write_queue: queue to put the list of asyncio.Futures on
queue: queue containing the requests to be made. It is used to know when to finish this task
func: function to use in the process pool. This is self.parse
Returns:
None
"""
pool = concurrent.futures.ProcessPoolExecutor(max(cpu_count(), 8))
# pool = concurrent.futures.ProcessPoolExecutor(1) # useful for debugging
loop = asyncio.get_running_loop()
while True:
await asyncio.sleep(consts.ASYNC_SLEEP)
if not next_queue.empty():
value = await next_queue.get()
if not func:
func = value.pop('parse_func')
futs = loop.run_in_executor(pool, partial(func, **value))
await write_queue.put(futs)
func = None
# futures.append(loop.run_in_executor(pool, partial(func, **value)))
next_queue.task_done()
# if not queue._unfinished_tasks and not next_queue._unfinished_tasks:
# break
pool.shutdown() # not very useful...
| 2.203125 | 2 |
qaboard/runners/celery_app.py | Samsung/qaboard | 51 | 12799009 | <reponame>Samsung/qaboard
import os
import subprocess
from celery import Celery
app = Celery('celery_app')
app.conf.update(
broker_url=os.environ.get('CELERY_BROKER_URL', 'pyamqp://guest@localhost//'),
result_backend=os.environ.get('CELERY_RESULT_BACKEND', 'rpc://'),
task_serializer='pickle',
accept_content=['pickle'],
result_serializer='pickle',
enable_utc=True,
)
from qaboard.config import config
celery_config = config.get('runners', {}).get('celery', {})
app.conf.update(**celery_config)
@app.task(bind=True, name=celery_config.get('qaboard_task_name', "qaboard"))
def start(self, job, cwd=None):
# https://docs.celeryproject.org/en/stable/userguide/tasks.html#task-request-info
print('Executing task id {0.id}, groupID: {0.group}'.format(self.request))
pipe = subprocess.PIPE
with subprocess.Popen(job.run_context.command, shell=True,
encoding='utf-8',
# Avoid issues with code outputing malformed unicode
# https://docs.python.org/3/library/codecs.html#error-handlers
errors='surrogateescape',
cwd=cwd if cwd else job.run_context.job_options['cwd'],
stdout=pipe, stderr=pipe) as process:
for line in process.stdout:
print(line, end='')
process.wait()
return process.returncode
| 2.140625 | 2 |
src/tf_frodo/__init__.py | xarion/tf_frodo | 1 | 12799010 | from .frodo import FRODO
| 1.046875 | 1 |
day10/python/subesokun/main.py | matason/aoc-2018 | 17 | 12799011 | <gh_stars>10-100
INPUT_FILE_NAME = 'input.txt'
def parseSpotInput(text):
tmp_split = text.split('>')
pos_str, velo_str = tmp_split[0], tmp_split[1]
pos = tuple([int(coord) for coord in pos_str.replace(' ', '').split('<')[1].split(',')])
velo = tuple([int(velo) for velo in velo_str.replace(' ', '').split('<')[1].split(',')])
return [pos, velo]
def hasSpotAt(x, y, spots):
for spot in spots:
if spot[0] == (x,y):
return True
return False
def printSpots(spots):
min_x = min(spots, key = lambda s: s[0][0])[0][0]
max_x = max(spots, key = lambda s: s[0][0])[0][0]
min_y = min(spots, key = lambda s: s[0][1])[0][1]
max_y = max(spots, key = lambda s: s[0][1])[0][1]
for y in range(min_y, max_y + 1):
print(''.join(['#' if hasSpotAt(x, y, spots) else ' ' for x in range(min_x, max_x + 1)]))
def tick(spots, dir=1):
for spot in spots:
spot[0] = (spot[0][0] + dir * spot[1][0], spot[0][1] + dir * spot[1][1])
def searchAndPrint(spots):
current_min_delta_y = None
current_second = 0
while True:
tick(spots)
current_second += 1
min_y = min(spots, key = lambda s: s[0][1])[0][1]
max_y = max(spots, key = lambda s: s[0][1])[0][1]
min_delta_y = max_y - min_y
if current_min_delta_y == None or min_delta_y < current_min_delta_y:
current_min_delta_y = min_delta_y
else:
tick(spots, -1)
printSpots(spots)
return current_second - 1
spots = []
with open(INPUT_FILE_NAME) as input_file:
for line in input_file:
spots.append(parseSpotInput(line.rstrip()))
print('Solution to part 1:')
seconds = searchAndPrint(spots)
print('Solution to part 2: %i' % (seconds,)) | 3.09375 | 3 |
register_api/views.py | BrenonOrtega/bug_catcher | 0 | 12799012 | <filename>register_api/views.py
from django.conf import settings
from django.contrib.auth.models import User
from django.http.response import Http404
from django.contrib.auth import get_user
from django.shortcuts import render
from rest_framework import status, mixins
from rest_framework.response import Response
from rest_framework.views import APIView, View
from .models import Bug
from .serializers import BugReadSerializer, BugWriteSerializer, UserReadSerializer
class BugList(APIView):
def get(self, request):
bug = Bug.objects.all()
serializer = BugReadSerializer(bug, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
""" def post(self, request):
serializer = BugWriteSerializer(data=request.data) """
def post(self, request):
author = get_user(request)
if author.is_authenticated :
serializer = BugWriteSerializer(data = request.data)
if serializer.is_valid():
serializer.save(author=author)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else: return Response(status=status.HTTP_403_FORBIDDEN)
class BugDetails(APIView, mixins.DestroyModelMixin):
def get_object(self, pk):
try:
bug_object = Bug.objects.get(pk=pk)
return bug_object
except:
raise Http404
def get(self, request, pk):
bug = self.get_object(pk)
serializer = BugReadSerializer(bug)
return Response(serializer.data, status=status.HTTP_200_OK)
def put(self, request, pk):
bug = self.get_object(pk)
serializer = BugWriteSerializer(bug, request.data)
modifier = get_user(request)
if modifier.is_authenticated:
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else: return Response(status=status.HTTP_403_FORBIDDEN)
#####################################################################################
class BugListPage(View):
def get(self, request):
bugs = Bug.objects.all()
context = {"bugs": bugs}
return render(request, "home.html", context)
class GetUserBugs(APIView):
def get(self, request):
user = User.objects.all()
serializer = UserReadSerializer(user, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
| 2.21875 | 2 |
resources/tools/presentation/pal.py | nidhyanandhan/csit | 0 | 12799013 | <gh_stars>0
# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CSIT Presentation and analytics layer.
"""
import sys
import argparse
import logging
from pal_errors import PresentationError
from environment import Environment, clean_environment
from specification_parser import Specification
from input_data_parser import InputData
from generator_tables import generate_tables
from generator_plots import generate_plots
from generator_files import generate_files
from static_content import prepare_static_content
from generator_report import generate_report
from generator_cpta import generate_cpta
from generator_alerts import Alerting, AlertingError
def parse_args():
"""Parse arguments from cmd line.
:returns: Parsed arguments.
:rtype: ArgumentParser
"""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
u"-s", u"--specification",
required=True,
type=argparse.FileType(u'r'),
help=u"Specification YAML file."
)
parser.add_argument(
u"-r", u"--release",
default=u"master",
type=str,
help=u"Release string of the product."
)
parser.add_argument(
u"-w", u"--week",
default=u"1",
type=str,
help=u"Calendar week when the report is published."
)
parser.add_argument(
u"-l", u"--logging",
choices=[u"DEBUG", u"INFO", u"WARNING", u"ERROR", u"CRITICAL"],
default=u"ERROR",
help=u"Logging level."
)
parser.add_argument(
u"-f", u"--force",
action=u"store_true",
help=u"Force removing the old build(s) if present."
)
parser.add_argument(
u"-o", u"--print-all-oper-data",
action=u"store_true",
help=u"Print all operational data to console. Be careful, the output "
u"can be really long."
)
parser.add_argument(
u"-i", u"--input-file",
type=str,
default=u"",
help=u"XML file generated by RobotFramework which will be processed "
u"instead of downloading the data from Nexus and/or Jenkins. In "
u"this case, the section 'input' in the specification file is "
u"ignored."
)
parser.add_argument(
u"-d", u"--input-directory",
type=str,
default=u"",
help=u"Directory with XML file(s) generated by RobotFramework or with "
u"sub-directories with XML file(s) which will be processed "
u"instead of downloading the data from Nexus and/or Jenkins. In "
u"this case, the section 'input' in the specification file is "
u"ignored."
)
return parser.parse_args()
def main():
"""Main function."""
log_levels = {u"NOTSET": logging.NOTSET,
u"DEBUG": logging.DEBUG,
u"INFO": logging.INFO,
u"WARNING": logging.WARNING,
u"ERROR": logging.ERROR,
u"CRITICAL": logging.CRITICAL}
args = parse_args()
logging.basicConfig(format=u"%(asctime)s: %(levelname)s: %(message)s",
datefmt=u"%Y/%m/%d %H:%M:%S",
level=log_levels[args.logging])
logging.info(u"Application started.")
try:
spec = Specification(args.specification)
spec.read_specification()
except PresentationError:
logging.critical(u"Finished with error.")
return 1
if spec.output[u"output"] not in (u"none", u"report", u"trending"):
logging.critical(
f"The output {spec.output[u'output']} is not supported."
)
return 1
ret_code = 1
try:
env = Environment(spec.environment, args.force)
env.set_environment()
prepare_static_content(spec)
data = InputData(spec)
if args.input_file:
data.process_local_file(args.input_file)
elif args.input_directory:
data.process_local_directory(args.input_directory)
else:
data.download_and_parse_data(repeat=1)
if args.print_all_oper_data:
data.print_all_oper_data()
generate_tables(spec, data)
generate_plots(spec, data)
generate_files(spec, data)
if spec.output[u"output"] == u"report":
generate_report(args.release, spec, args.week)
elif spec.output[u"output"] == u"trending":
sys.stdout.write(generate_cpta(spec, data))
try:
alert = Alerting(spec)
alert.generate_alerts()
except AlertingError as err:
logging.warning(repr(err))
else:
logging.info("No output will be generated.")
logging.info(u"Successfully finished.")
ret_code = 0
except AlertingError as err:
logging.critical(f"Finished with an alerting error.\n{repr(err)}")
except PresentationError as err:
logging.critical(f"Finished with a PAL error.\n{str(err)}")
except (KeyError, ValueError) as err:
logging.critical(f"Finished with an error.\n{repr(err)}")
finally:
if spec is not None:
clean_environment(spec.environment)
return ret_code
if __name__ == u"__main__":
sys.exit(main())
| 2.078125 | 2 |
machine_learning/classification.py | soikkea/python-algorithms | 0 | 12799014 | """Classification methods."""
import numpy as np
from machine_learning.constants import N_CLASSES, FOLDS, MAX_K, RANDOM_SEED
from machine_learning.utilities import k_fold_split_indexes, get_k_nn
def classification(method, error_func, train, test, **kwargs):
"""Perform classification for data and return error.
Arguments:
method {function} -- Classification method.
error_func {function} -- Error function.
train {DataTuple} -- Training data.
test {DataTuple} -- Test data.
All extra keyword arguments are passed to method.
Returns:
float -- Error value returned by error_func.
"""
y_pred = method(train, test, **kwargs)
return error_func(y_pred, test.y.values)
def max_classifier(train, test):
"""Maximum classifier.
Classifies using the most common class in training data.
Arguments:
train {DataTuple} -- Training data.
test {DataTuple} -- Test data.
Returns:
ndarray -- Predicted values.
"""
max_category = max_classifier_fit(train.X, train.y)
y_pred = max_classifier_predict(test.X, max_category)
return y_pred
def max_classifier_fit(X, y):
"""Determines the most common class in input.
Arguments:
X {DataFrame} -- Indendent variables.
y {DataFrame} -- Dependent variable.
Returns:
int -- Most common class.
"""
y = y.values
max_category = np.bincount(y.astype(int)).argmax()
return max_category
def max_classifier_predict(X, max_category):
"""Classify using max classifier.
Arguments:
X {DataFrame} -- Independent variables.
max_category {int} -- Class to classify to.
Returns:
ndarray -- Predicted values.
"""
y_pred = np.ones((X.shape[0], 1), dtype=np.int) * max_category
return y_pred
def multinomial_naive_bayes_classifier(train, test, n_classes=N_CLASSES):
"""Multinomial naive bayes classifier.
See more at:
https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes
Arguments:
train {DataTuple} -- Training data.
test {DataTuple} -- Test data.
Keyword Arguments:
n_classes {int} -- Number of classes. (default: {N_CLASSES})
Returns:
ndarray -- Predicted values.
"""
train_X = train.X.values
train_y = train.y.values
test_X = test.X.values
class_priors, feature_likelihoods = mnb_classifier_fit(train_X, train_y,
n_classes)
y_pred = mnb_classifier_predict(test_X, class_priors, feature_likelihoods)
return y_pred
def mnb_classifier_fit(X, y, n_classes):
"""Fit MNB classifier.
Calculates class priors and feature likelihoods.
Arguments:
X {ndarray} -- Independent variables.
y {ndarray} -- Dependent variables.
n_classes {int} -- Number of classes.
Returns:
ndarray -- Class priors.
ndarray -- Feature likelihoods.
"""
class_priors = mnb_class_priors(y, n_classes)
feature_likelihoods = mnb_feature_likelihoods(X, y, n_classes)
return class_priors, feature_likelihoods
def mnb_class_priors(y, n_classes):
"""Calculates the logaritm of the probability of belonging to each class.
Arguments:
y {ndarray} -- Class labels.
n_classes {int} -- Number of class labels.
Returns:
ndarray -- Log of prior probabilities.
"""
priors = np.zeros(n_classes)
for c in range(n_classes):
priors[c] = np.log(np.sum(y == c) / y.size)
return priors
def mnb_feature_likelihoods(X, y, n_classes):
"""Calculates the probability of feature j, given class k, using Laplace smoothing.
Arguments:
X {ndarray} -- Features.
y {ndarray} -- Class labels.
n_classes {int} -- Number of classes.
Returns:
ndarray -- Logs of feature likelihoods.
"""
n_features = X.shape[1]
p_ij = np.zeros((n_classes, n_features))
for c in range(n_classes):
Fc_sum = np.sum(X[y == c, :])
for j in range(n_features):
Fnc = np.sum(X[y == c, j])
p_ij[c, j] = np.log((1.0 + Fnc) / (n_features + Fc_sum))
return p_ij
def mnb_classifier_predict(X, class_priors, feature_likelihoods):
"""Classify using MNB classifier.
Arguments:
X {ndarray} -- Independent variables.
class_priors {ndarray} -- Class priors.
feature_likelihoods {ndarray} -- Feature likelihoods.
Returns:
ndarray -- Predicted values.
"""
n_classes = class_priors.size
N = X.shape[0]
posterior = np.zeros((N, n_classes))
for i in range(N):
posterior[i, :] = feature_likelihoods.dot(X[i, :])
for c in range(n_classes):
posterior[:, c] = posterior[:, c] + class_priors[c]
y_pred = np.argmax(posterior, axis=1)
return y_pred
def k_nn_classifier(train, test, k):
"""K-nearest neighbors classifier.
Arguments:
train {DataTuple} -- Training data.
test {DataTuple} -- Test data.
k {int} -- Value for k.
Returns:
ndarray -- Predicted values.
"""
y_pred = k_nn_classifier_predict(test.X, train.X, train.y, k)
return y_pred
def k_nn_classifier_fit(train, n_folds=FOLDS, max_k=MAX_K):
"""'Fit' K-nearest neighbors classifier by finding optimal value for k using cross validation.
Arguments:
train {DataTuple} -- Training data.
Keyword Arguments:
n_folds {int} -- Number of folds to use for validation. (default: {FOLDS})
max_k {int} -- Maximum value for k. (default: {MAX_K})
Returns:
int -- Optimal value for k.
float -- Error for selected k.
"""
# TODO: combine with k_nn_regression_fit()?
X = train.X.values
y = train.y.values
N = X.shape[0]
folds = k_fold_split_indexes(N, n_folds)
min_error = np.infty
best_k = 1
for k in range(1, max_k):
errors = np.zeros(n_folds)
for i in range(n_folds):
tmp_folds = folds[:]
valid_ix = tmp_folds.pop(i)
train_ix = np.concatenate(tmp_folds)
y_pred = k_nn_classifier_predict(X[valid_ix, :], X[train_ix, :],
y[train_ix], k)
error = classification_error(y_pred, y[valid_ix])
errors[i] = (valid_ix.size * error)
mean_error = np.sum(errors) / N
if mean_error < min_error:
min_error = mean_error
best_k = k
return int(best_k), min_error
def k_nn_classifier_predict(X, X_train, y_train, k, n_classes=N_CLASSES):
"""Classify using K-nearest neighbors classifier.
Assigns class labels based on the most common class in k-nearest neighbors.
Arguments:
X {DataFrame} -- Independent variables.
X_train {DataFrame} -- Independent training variables.
y_train {DataFrame} -- Dependent training variables.
k {int} -- Value of k.
Keyword Arguments:
n_classes {int} -- Number of classes. (default: {N_CLASSES})
Returns:
ndarray -- Predicted variables.
"""
try:
X = X.values
except AttributeError:
pass
try:
X_train = X_train.values
except AttributeError:
pass
try:
y_train = y_train.values
except AttributeError:
pass
assert X.shape[1] == X_train.shape[1]
N = X.shape[0]
y_pred = np.zeros((N, 1))
for i in range(N):
point = X[i, :]
neighbors, _ = get_k_nn(point, X_train, k)
train_labels = y_train[neighbors]
class_sums = [np.sum(train_labels == i) for i in range(n_classes)]
y_pred[i] = k_nn_assign_label(class_sums)
return y_pred
def k_nn_assign_label(class_sums):
"""Assing label according the most common class.
If there are multiple candidates, pick one randomly.
Arguments:
class_sums {list} -- Class frequencies.
Returns:
int -- Assinged class label.
"""
order = np.argsort(class_sums)[::-1]
candidates = [x for x in order if x == order[0]]
return np.random.RandomState(RANDOM_SEED).choice(candidates)
def classification_error(y_pred, y_true):
"""Return classification error.
Sum of incorrectly assinged classes divided by the number of points.
Arguments:
y_pred {ndarray} -- Predicted values.
y_true {ndarray} -- True values.
Returns:
float -- Error.
"""
y_true = y_true.reshape(y_pred.shape)
return np.sum(y_pred.astype(np.int)
!= y_true.astype(np.int)) / float(y_pred.size)
| 3.65625 | 4 |
test/unit/test_providers_raw_master_key_config.py | farleyb-amazon/aws-encryption-sdk-python | 95 | 12799015 | <filename>test/unit/test_providers_raw_master_key_config.py<gh_stars>10-100
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Unit test suite to validate aws_encryption_sdk.key_providers.raw.RawMasterKeyConfig"""
import pytest
import six
from aws_encryption_sdk.identifiers import EncryptionKeyType, WrappingAlgorithm
from aws_encryption_sdk.internal.crypto.wrapping_keys import WrappingKey
from aws_encryption_sdk.key_providers.base import MasterKeyConfig
from aws_encryption_sdk.key_providers.raw import RawMasterKeyConfig
from .unit_test_utils import all_invalid_kwargs, all_valid_kwargs
pytestmark = [pytest.mark.unit, pytest.mark.local]
STATIC_WRAPPING_KEY = WrappingKey(
wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING,
wrapping_key=b"_________a symmetric key________",
wrapping_key_type=EncryptionKeyType.SYMMETRIC,
)
VALID_KWARGS = {
RawMasterKeyConfig: [
dict(key_id=b"a raw key", provider_id="a provider", wrapping_key=STATIC_WRAPPING_KEY),
dict(key_id=b"a raw key", provider_id=b"a provider", wrapping_key=STATIC_WRAPPING_KEY),
]
}
@pytest.mark.parametrize("cls, kwargs", all_valid_kwargs(VALID_KWARGS))
def test_attributes_valid_kwargs(cls, kwargs):
cls(**kwargs)
@pytest.mark.parametrize("cls, kwargs", all_invalid_kwargs(VALID_KWARGS))
def test_attributes_invalid_kwargs(cls, kwargs):
with pytest.raises(TypeError):
cls(**kwargs)
def test_parent():
assert issubclass(RawMasterKeyConfig, MasterKeyConfig)
@pytest.mark.parametrize("cls, kwargs", all_valid_kwargs(VALID_KWARGS))
def test_attributes_converts(cls, kwargs):
test = cls(**kwargs)
assert isinstance(test.provider_id, six.string_types)
| 2.015625 | 2 |
spatiotemporal/db/functions.py | ResonantGeoData/django-image-annotations | 0 | 12799016 | """Django database functions.
This module supplements Django's own coverage of Postgres
and PostGIS functions.
https://docs.djangoproject.com/en/4.0/ref/models/expressions/#func-expressions-1
"""
from django.contrib.gis.db.models import GeometryField, LineStringField, PointField
from django.db.models import FloatField, Func
class Box3D(Func):
"""Compute the 3D bounding box of a geometry."""
function = "Box3D"
output_field = GeometryField()
class XMax(Func):
"""Returns the X maxima of a 2D or 3D bounding box or a geometry."""
function = "ST_XMax"
output_field = FloatField()
class XMin(Func):
"""Returns the X minima of a 2D or 3D bounding box or a geometry."""
function = "ST_XMin"
output_field = FloatField()
class YMax(Func):
"""Returns the Y maxima of a 2D or 3D bounding box or a geometry."""
function = "ST_YMax"
output_field = FloatField()
class YMin(Func):
"""Returns the Y minima of a 2D or 3D bounding box or a geometry."""
function = "ST_YMin"
output_field = FloatField()
class ZMax(Func):
"""Returns the Z maxima of a 2D or 3D bounding box or a geometry."""
function = "ST_ZMax"
output_field = FloatField()
class ZMin(Func):
"""Returns the Z minima of a 2D or 3D bounding box or a geometry."""
function = "ST_ZMin"
output_field = FloatField()
class MakePoint(Func):
"""Compute the pixel type of the first band of a raster."""
function = "ST_MakePoint"
output_field = PointField(srid=0)
class MakeLine(Func):
"""Compute the pixel type of the first band of a raster."""
function = "ST_MakeLine"
output_field = LineStringField(srid=0)
| 2.75 | 3 |
reskit/util/leap_day.py | OfficialCodexplosive/RESKit | 16 | 12799017 | import numpy as np
import pandas as pd
from . import ResError
def remove_leap_day(timeseries):
"""Removes leap days from a given timeseries
Parameters
----------
timeseries : array_like
The time series data to remove leap days from
* If something array_like is given, the length must be 8784
* If a pandas DataFrame or Series is given, time indexes will be used
directly
Returns
-------
Array
"""
if isinstance(timeseries, np.ndarray):
if timeseries.shape[0] == 8760:
return timeseries
elif timeseries.shape[0] == 8784:
times = pd.date_range("01-01-2000 00:00:00",
"12-31-2000 23:00:00", freq="H")
sel = np.logical_and((times.day == 29), (times.month == 2))
if len(timeseries.shape) == 1:
return timeseries[~sel]
else:
return timeseries[~sel, :]
else:
raise ResError('Cannot handle array shape ' + str(timeseries.shape))
elif isinstance(timeseries, pd.Series) or isinstance(timeseries, pd.DataFrame):
times = timeseries.index
sel = np.logical_and((times.day == 29), (times.month == 2))
if isinstance(timeseries, pd.Series):
return timeseries[~sel]
else:
return timeseries.loc[~sel]
else:
return remove_leap_day(np.array(timeseries))
| 3.96875 | 4 |
accounts/migrations/0002_auto_20190727_1159.py | Alwin1847207/Hackathon | 0 | 12799018 | # Generated by Django 2.2.3 on 2019-07-27 06:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1000)),
('desc', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_usr', models.IntegerField()),
('phone', models.CharField(max_length=15)),
],
),
migrations.AddField(
model_name='registration',
name='userType',
field=models.CharField(blank=True, max_length=50),
),
]
| 1.867188 | 2 |
pipekit/__init__.py | DrDub/pipekit | 3 | 12799019 | <reponame>DrDub/pipekit
#!/usr/bin/env python3
from .pipe import NullPipe # noqa: W0611
| 1.039063 | 1 |
djforms/alumni/urls.py | carthagecollege/django-djforms | 1 | 12799020 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from django.urls import path
from django.views.generic import TemplateView
from djforms.alumni.classnotes import views as classnotes
from djforms.alumni.distinguished import views as distinguished
from djforms.alumni.memory import views as memory
urlpatterns = [
path(
'success/',
TemplateView.as_view(template_name='alumni/data_entered.html'),
),
# classnotes
path(
'classnotes/carthaginian/',
classnotes.screenscrape,
name='classnotes_carthaginian',
),
path(
'classnotes/success/',
TemplateView.as_view(template_name='alumni/classnotes/done.html'),
name='classnotes_success',
),
path(
'classnotes/archives/<int:year>/',
classnotes.archives,
name='classnotes_archives_year',
),
path(
'classnotes/archives/',
classnotes.archives,
name='classnotes_archives',
),
path(
'classnotes/inmemoriam/',
classnotes.obits,
name='classnotes_obits',
),
path(
'classnotes/',
classnotes.contact,
name='classnotes_form',
),
# distinguised alumni nomination
path(
'distinguished/nomination/success/',
TemplateView.as_view(template_name='alumni/data_entered.html'),
name='distinguished_nomination_success',
),
path(
'distinguished/nomination/',
distinguished.nomination_form,
name='distinguished_nomination_form',
),
# fond memories
path(
'memory/success/',
TemplateView.as_view(template_name='alumni/memory/done.html'),
name='memory_questionnaire_success',
),
path(
'memory/archives/',
memory.questionnaire_archives,
name='memory_questionnaire_archives',
),
path(
'memory/<int:quid>/detail/',
memory.questionnaire_detail,
name='memory_questionnaire_detail',
),
path(
'memory/<str:campaign>/',
memory.questionnaire_form,
name='memory_questionnaire_promo_form',
),
path(
'memory/',
memory.questionnaire_form,
name='memory_questionnaire_form',
),
]
| 1.859375 | 2 |
utils/messages/create-welsh-translations.py | hmrc/cds-reimbursement-claim-frontend | 1 | 12799021 | import csv
import sys
def write_welsh_translations(csv_file_name, output_file_name):
with open(csv_file_name, newline='') as csv_file:
messages = csv.reader(csv_file, delimiter=',')
output_file = open(output_file_name, 'w+')
# skip headers
for i in range(2):
next(messages, None)
# write translations
try:
invalid = []
for message in messages:
key = message[0].strip()
welsh = message[2].strip()
if not key:
output_file.write('\n')
elif key.startswith('#'):
if len(key) == 1:
output_file.write(key + '===================================================\n')
else:
output_file.write(key + '\n')
elif len(welsh) > 0:
output_file.write('{}={}\n'.format(key, welsh))
else:
invalid.append(message)
print('Finished')
print('Invalid records: ', len(invalid))
for x in invalid:
print('* {}'.format(x))
except IOError:
print("Error writing translations")
output_file.close()
if __name__ == '__main__':
output_file_name = "messages.cy"
if len(sys.argv) < 2:
print('Error: please provide the source CSV file name including fullpath in command line arguments')
print('Usage: create_welsh_messages.py [CSV file name] [output file name]\n')
print('Note: output file name is optional, default name is "{}"'.format(output_file_name))
else:
write_welsh_translations(sys.argv[1], sys.argv[2] if len(sys.argv) == 3 else output_file_name)
| 3.390625 | 3 |
dtu-api/dtu_api/model/database.py | demitri/DtU_api | 1 | 12799022 | <reponame>demitri/DtU_api
#!/usr/bin/python
''' This file handles a database connection. It can simply be deleted if not needed.
The example given is for a PostgreSQL database, but can be modified for any other.
'''
import psycopg2
from ..config import AppConfig
from ..designpatterns import singleton
config = AppConfig()
@singleton
class MyApplicationDatabase(object):
def __init__(self):
self.pool = None
def pool(self, release):
''' Return the pool of database connections for the database connected. '''
# -----------------------------------
# Database connection setup & methods
# -----------------------------------
# Ref: http://initd.org/psycopg/docs/module.html
# Ref: http://packages.python.org/psycopg2/pool.html#module-psycopg2.pool
# dsn = data source name
if self.pool is None:
db_info = {}
#for key in self.config.options(""):
# db_info[key] = config.
return self.pool
### etc. ###
## TODO: create a sample db file for PostgreSQL, SQLite, and SQLAlchemy | 2.53125 | 3 |
polarization/profiles.py | woutergins/polarization | 0 | 12799023 | <reponame>woutergins/polarization<filename>polarization/profiles.py
"""
.. module:: profiles
:platform: Windows
:synopsis: Implementation of classes for different lineshapes,
creating callables for easy and intuitive calculations.
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
from scipy.special import wofz
from scipy.interpolate import interp1d
__all__ = ['Gaussian', 'Lorentzian', 'Voigt', 'PseudoVoigt',
'ExtendedVoigt', 'Irrational', 'HyperbolicSquared']
sqrt2 = 2 ** 0.5
sqrt2pi = (2 * np.pi) ** 0.5
sqrt2log2t2 = 2 * np.sqrt(2 * np.log(2))
base_e = np.exp(1)
class Profile(object):
def __init__(self, fwhm=None, mu=None, amp=None, ampIsArea=False):
super(Profile, self).__init__()
self.ampIsArea = ampIsArea
self.fwhm = np.abs(fwhm) if fwhm is not None else np.abs(1.0)
self.mu = mu if mu is not None else 0.0
self.amp = amp if amp is not None else 1.0
def __repr__(self):
s = str(type(self)) + 'FWHM: {}, mu: {}, amp: {}'
s = s.format(self.fwhm, self.mu, self.amp)
return s
def __call__(self, vals):
if self.ampIsArea:
factor = 1.0
else:
factor = self._normFactor
vals = vals / factor
return self.amp * vals
class Gaussian(Profile):
r"""A callable normalized Gaussian profile.
Parameters
----------
fwhm: float
Full Width At Half Maximum, defaults to 1.
mu: float
Location of the center, defaults to 0.
amp: float
Amplitude of the profile, defaults to 1.
Returns
-------
Gaussian
Callable instance, evaluates the Gaussian profile in the arguments
supplied.
Note
----
The used formula is taken from the MathWorld webpage
http://mathworld.wolfram.com/GaussianFunction.html:
.. math::
G(x;\mu, \sigma) &= \frac{\exp\left(-\frac{1}{2}\left(\frac{x-\mu}
{\sigma}\right)^2\right)}{\sqrt{2\pi}\sigma}
FWHM &= s\sqrt{2\ln\left(2\right)}\sigma"""
def __init__(self, fwhm=None, mu=None, amp=None, **kwargs):
super(Gaussian, self).__init__(fwhm=fwhm, mu=mu,
amp=amp, **kwargs)
@property
def fwhm(self):
return self._fwhm
@fwhm.setter
def fwhm(self, value):
self._fwhm = value
self.sigma = self.fwhm / (sqrt2log2t2)
if not self.ampIsArea:
self._normFactor = (self.sigma * sqrt2pi) ** (-1)
def __call__(self, x):
x = x - self.mu
expPart = np.exp(-0.5 * (x / self.sigma) ** 2)
normPart = self.sigma * sqrt2pi
return super(Gaussian, self).__call__(expPart / normPart)
class Lorentzian(Profile):
r"""A callable normalized Lorentzian profile.
Parameters
----------
fwhm: float
Full Width At Half Maximum, defaults to 1.
mu: float
Location of the center, defaults to 0.
amp: float
Amplitude of the profile, defaults to 1.
Returns
-------
Lorentzian
Callable instance, evaluates the Lorentzian profile in the arguments
supplied.
Note
----
The formula used is taken from the MathWorld webpage
http://mathworld.wolfram.com/LorentzianFunction.html:
.. math::
\mathcal{L}\left(x; \mu, \gamma\right) &= \frac{\gamma}
{\pi\left(\left(x-\mu\right)^2+\gamma^2\right)}
FWHM &= 2\gamma"""
def __init__(self, fwhm=None, mu=None, amp=None, **kwargs):
super(Lorentzian, self).__init__(fwhm=fwhm, mu=mu,
amp=amp, **kwargs)
@property
def fwhm(self):
return self._fwhm
@fwhm.setter
def fwhm(self, value):
self._fwhm = value
self.gamma = 0.5 * self.fwhm
if not self.ampIsArea:
self._normFactor = 1.0 / (self.gamma * np.pi)
def __call__(self, x):
x = x - self.mu
topPart = self.gamma
bottomPart = (x ** 2 + self.gamma ** 2) * np.pi
return super(Lorentzian, self).__call__(topPart / bottomPart)
class Voigt(Profile):
r"""A callable normalized Voigt profile.
Parameters
----------
fwhm: list of 2 floats
Full Width At Half Maximum of the components, defaults to 1.
Ordered as Gaussian, then Lorentzian.
mu: float
Location of the center, defaults to 0.
amp: float
Amplitude of the profile, defaults to 1.
Attributes
----------
totalfwhm: float
Approximation of the width based on the underlying widths.
Returns
-------
Voigt
Callable instance, evaluates the Voigt profile in the arguments supplied.
Note
----
The formula used is taken from the Wikipedia webpage
http://en.wikipedia.org/wiki/Voigt_profile, with :math:`w(z)` the Faddeeva
function, and the values supplied as FWHM are appropriately transformed to
:math:`\sigma` and :math:`\gamma`:
.. math::
V\left(x;\mu, \sigma, \gamma\right) &= \frac{\Re\left[w\left(z\right)
\right]}{\sigma\sqrt{2\pi}}
z&=\frac{x+i\gamma}{\sigma\sqrt{2\pi}}"""
def __init__(self, fwhm=None, mu=None, amp=None, **kwargs):
self._fwhmNorm = np.array([sqrt2log2t2, 2])
super(Voigt, self).__init__(fwhm=fwhm, mu=mu,
amp=amp, **kwargs)
@property
def fwhm(self):
return self._fwhm
@fwhm.setter
def fwhm(self, value):
if isinstance(value, (list, tuple, np.ndarray)):
seperate = value[0:2]
self.fwhmG, self.fwhmL = seperate
G, L = seperate
self._fwhm = 0.5346 * self.fwhmL + \
(0.2166 * self.fwhmL ** 2 + self.fwhmG ** 2) ** 0.5
self.sigma, self.gamma = seperate / self._fwhmNorm
else:
self.fwhmG, self.fwhmL = value, value
self._fwhm = 0.6144031129489123 * value
self.sigma, self.gamma = self._fwhm / self._fwhmNorm
if not self.ampIsArea:
z = (0 + 1j * self.gamma) / (self.sigma * sqrt2)
top = wofz(z).real / (self.sigma * sqrt2pi)
self._normFactor = top
def __call__(self, x):
x = x - self.mu
z = (x + 1j * self.gamma) / (self.sigma * sqrt2)
top = wofz(z).real / (self.sigma * sqrt2pi)
return super(Voigt, self).__call__(top)
class Irrational(Profile):
r"""A callable normalized Irrational profile.
Parameters
----------
fwhm: float
Full Width At Half Maximum, defaults to 1.
mu: float
Location of the center, defaults to 0.
amp: float
Amplitude of the profile, defaults to 1.
Returns
-------
Irrational
Callable instance, evaluates the irrational profile in the arguments
supplied.
Note
----
The used formula is taken from T. Ida et al. :cite:`Ida2000`,
code inspired by the PhD thesis of Deyan Yordanov :cite:`Yordanov2007`.
.. math::
\mathcal{I}\left(x; \mu, g\right) &= \frac{g}{2}\left[1+\left(\frac{x-
\mu}{g}\right)^2\right]^{-3/2}
FWHM &= \sqrt{2^{2/3}-1}g"""
def __init__(self, fwhm=None, mu=None, amp=None, **kwargs):
super(Irrational, self).__init__(fwhm=fwhm, mu=mu,
amp=amp, **kwargs)
@property
def fwhm(self):
return self._fwhm
@fwhm.setter
def fwhm(self, value):
self._fwhm = value
self.gamma = self.fwhm / np.sqrt(np.power(2, 2.0 / 3) - 1)
if not self.ampIsArea:
self._normFactor = (1.0 ** (-1.5)) / (2 * self.gamma)
def __call__(self, x):
x = x - self.mu
val = ((1.0 + (x / self.gamma) ** 2) ** (-1.5)) / (2 * self.gamma)
return super(Irrational, self).__call__(val)
class HyperbolicSquared(Profile):
r"""A callable normalized HyperbolicSquared profile.
Parameters
----------
fwhm: float
Full Width At Half Maximum, defaults to 1.
mu: float
Location of the center, defaults to 0.
amp: float
Amplitude of the profile, defaults to 1.
Returns
-------
Hyperbolic
Callable instance, evaluates the hyperbolic profile in the arguments
supplied.
Note
----
The used formula is taken from T. Ida et al. :cite:`Ida2000`, code inspired by the PhD thesis of
<NAME> :cite:`Yordanov2007`.
.. math::
H\left(x;\mu, g\right) &= \frac{1}{2g}\cosh^{-2}\left(\frac{x-\mu}{g}
\right)
FWHM &= 2g\ln\left(\sqrt{2}+1\right)"""
def __init__(self, fwhm=None, mu=None, amp=None, **kwargs):
super(HyperbolicSquared, self).__init__(fwhm=fwhm, mu=mu,
amp=amp, **kwargs)
@property
def fwhm(self):
return self._fwhm
@fwhm.setter
def fwhm(self, value):
self._fwhm = value
self.gamma = self.fwhm / (2 * np.log(np.sqrt(2) + 1))
if not self.ampIsArea:
self._normFactor = 1.0 / (2 * self.gamma)
def __call__(self, x):
x = x - self.mu
coshPart = (1.0 / np.cosh(x / self.gamma)) ** 2
simplePart = 2 * self.gamma
return super(HyperbolicSquared, self).__call__(coshPart / simplePart)
class PseudoVoigt(Profile):
r"""A callable normalized PseudoVoigt profile.
Parameters
----------
fwhm: float
Full Width At Half Maximum, defaults to 1.
mu: float
Location of the center, defaults to 0.
amp: float
Amplitude of the profile, defaults to 1.
Returns
-------
PseudoVoigt
Callable instance, evaluates the pseudovoigt profile in the arguments
supplied.
Note
----
The formula used is taken from the webpage
http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation, and the
supplied FWHM is appropriately transformed for the Gaussian and Lorentzian
lineshapes:
.. math::
\mathcal{V}\left(x; \mu, \eta, \sigma, \gamma\right) = \eta \mathcal{L}
(x; \gamma, \mu) + (1-\eta) G(x; \sigma, \mu)"""
def __init__(self, eta=None, fwhm=None, mu=None,
amp=None, **kwargs):
self.L = Lorentzian(**kwargs)
self.G = Gaussian(**kwargs)
self._n = np.abs(eta) if eta is not None else 0.5
if self._n > 1:
self._n = self._n - int(self._n)
super(PseudoVoigt, self).__init__(fwhm=fwhm, mu=mu,
amp=amp, **kwargs)
@property
def fwhm(self):
return self._fwhm
@fwhm.setter
def fwhm(self, value):
self._fwhm = value
self.L.fwhm = value
self.G.fwhm = value
if not self.ampIsArea:
self._normFactor = self.n * self.L(0)
self._normFactor += (1.0 - self.n) * self.G(0)
@property
def n(self):
return self._n
@n.setter
def n(self, value):
value = np.abs(value)
if value > 1:
value = value - int(value)
self._n = value
if not self.ampIsArea:
self._normFactor = self.n * self.L(0)
self._normFactor += (1.0 - self.n) * self.G(0)
def __call__(self, x):
x = x - self.mu
val = self.n * self.L(x) + (1.0 - self.n) * self.G(x)
return super(PseudoVoigt, self).__call__(val)
class ExtendedVoigt(Profile):
r"""A callable normalized extended Voigt profile.
Parameters
----------
fwhm: list of 2 floats
Full Width At Half Maximum, defaults to 1, ordered as Gaussian and
Lorentzian width.
mu: float
Location of the center, defaults to 0.
amp: float
Amplitude of the profile, defaults to 1.
Attributes
----------
totalfwhm: float
Approximation of the total width, based on the underlying widths.
Returns
-------
ExtendedVoigt
Callable instance, evaluates the extended Voigt profile in the arguments
supplied.
Note
----
Formula taken from <NAME> et al. :cite:`Ida2000`, code
inspired by the PhD thesis of <NAME> :cite:`Yordanov2007`.
This class uses a weighted sum of the Gaussian,
Lorentzian, Irrational and HyperbolicSquared profiles."""
def __init__(self, fwhm=None, mu=None, amp=None, **kwargs):
self.kwargs = kwargs
super(ExtendedVoigt, self).__init__(fwhm=fwhm, mu=mu,
amp=amp, **kwargs)
@property
def fwhm(self):
return self._fwhm
@fwhm.setter
def fwhm(self, value):
if isinstance(value, (list, tuple, np.ndarray)):
seperate = value[0:2]
self.fwhmG, self.fwhmL = seperate
self._fwhm = 0.5346 * self.fwhmL + \
np.sqrt(0.2166 * self.fwhmL ** 2 + self.fwhmG ** 2)
else:
self.fwhmG, self.fwhmL = value, value
self._fwhm = 0.6144031129489123 * value
self.setParams()
def setParams(self):
a = np.array(
[-2.95553, 8.48252, -9.48291,
4.74052, -1.24984, 0.15021, 0.66])
b = np.array(
[3.19974, -16.50453, 29.14158,
-23.45651, 10.30003, -1.25693, -0.42179])
c = np.array(
[-17.80614, 57.92559, -73.61822,
47.06071, -15.36331, 1.43021, 1.19913])
d = np.array(
[-1.26571, 4.05475, -4.55466,
2.76622, -0.68688, -0.47745, 1.10186])
f = np.array(
[3.7029, -21.18862, 34.96491,
-24.10743, 9.3155, -1.38927, -0.30165])
g = np.array(
[9.76947, -24.12407, 22.10544,
-11.09215, 3.23653, -0.14107, 0.25437])
h = np.array(
[-10.02142, 32.83023, -39.71134,
23.59717, -9.21815, 1.50429, 1.01579])
self.rho = self.fwhmL / (self.fwhmL + self.fwhmG)
self.wG = np.polyval(a, self.rho)
self.wL = np.polyval(b, self.rho)
self.wI = np.polyval(c, self.rho)
self.wH = np.polyval(d, self.rho)
self.nL = np.polyval(f, self.rho)
self.nI = np.polyval(g, self.rho)
self.nH = np.polyval(h, self.rho)
self.wG = s * (1 - self.rho * self.wG)
self.wL = s * (1 - (1 - self.rho) * self.wL)
self.wI = s * self.wI
self.wH = s * self.wH
self.nL = self.rho * (1 + (1 - self.rho) * self.nL)
self.nI = self.rho * (1 - self.rho) * self.nI
self.nH = self.rho * (1 - self.rho) * self.nH
self.G = Gaussian(fwhm=self.wG, **self.kwargs)
self.L = Lorentzian(fwhm=self.wL, **self.kwargs)
self.I = Irrational(fwhm=self.wI, **self.kwargs)
self.H = HyperbolicSquared(fwhm=self.wH, **self.kwargs)
self.fwhmV = (self.fwhmG ** 5 +
2.69269 * (self.fwhmG ** 4) * self.fwhmL +
2.42843 * (self.fwhmG ** 3) * (self.fwhmL ** 2) +
4.47163 * (self.fwhmG ** 2) * (self.fwhmL ** 3) +
0.07842 * self.fwhmG * (self.fwhmL ** 4) +
self.fwhmL ** 5
) ** (1.0 / 5)
if not self.ampIsArea:
Gauss = (1 - self.nL - self.nI - self.nH) * self.G(0)
Lorentz = self.nL * self.L(0)
Irrat = self.nI * self.I(0)
Hyper = self.nH * self.H(0)
val = Gauss + Lorentz + Irrat + Hyper
self._normFactor = val
def __call__(self, x):
x = x - self.mu
Gauss = (1 - self.nL - self.nI - self.nH) * self.G(x)
Lorentz = self.nL * self.L(x)
Irrat = self.nI * self.I(x)
Hyper = self.nH * self.H(x)
val = Gauss + Lorentz + Irrat + Hyper
return super(ExtendedVoigt, self).__call__(val)
| 2.78125 | 3 |
blueprints/tdmaps/tdmaps.py | filipecosta90/im2modelpy_api | 0 | 12799024 | from flask import Flask, render_template, request, redirect, Blueprint
from flask_jsonpify import jsonpify
from werkzeug import secure_filename
import urllib.request
import urllib.error
import urllib.parse
import os
import json
import sys
import ase.io.cif
from ase import Atoms
import zlib
import sqlite3
import ntpath
import numpy as np
from matplotlib import pyplot as plt
import sys
#so that we can import globals
sys.path.append('../..')
from globals import *
#backgroud save_cells_unitcells_data job
def save_tdmaps_setup_data( id, tdmap_setup_json, tdmapsDBPath, tdmapsDBSchema):
inserted_key = None
sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBSchema)
sql = '''insert into tdmaps ( id ) VALUES( ? );'''
id_key = None
sqlite3_conn.execute( sql,[ id_key ] )
sqlite3_conn.commit()
cur = sqlite3_conn.cursor()
sql_select = "SELECT last_insert_rowid();"
cur.execute(sql_select)
result_string = cur.fetchone()
if result_string:
inserted_key = result_string[0]
sqlite3_conn.close()
return inserted_key
def get_tdmaps_data( id, tdmapsDBPath, tdmapsDBSchema ):
result = None
inserted_key = None
sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBPath)
cur = sqlite3_conn.cursor()
sql = "SELECT id , exp_setup_conf_id, cells_conf_id, slices_conf_id, waves_conf_id, dats_conf_id, simgrids_conf_id FROM tdmaps WHERE id = {0}".format(id)
cur.execute(sql)
result_binary = cur.fetchone()
if result_binary:
result = {
'id': result_binary[0],
'exp_setup_conf_id': result_binary[1],
'cells_conf_id': result_binary[2],
'slices_conf_id': result_binary[3],
'waves_conf_id': result_binary[4],
'dats_conf_id': result_binary[5],
'simgrids_conf_id': result_binary[6]
}
sqlite3_conn.close()
return result
#####################################################
##################### BLUEPRINT #####################
#####################################################
tdmaps = Blueprint(
'tdmaps', #name of module
__name__,
template_folder='templates' # templates folder
)
@tdmaps.route('/')
def index_tdmaps():
return render_template('index_tdmaps.html')
@tdmaps.route('/<string:tdmapid>', methods = ['GET'])
def api_tdmaps_get(tdmapid):
global apiVersion
global tdmapsDBPath
global tdmapsDBSchema
status = None
result = None
data = get_tdmaps_data(tdmapid, tdmapsDBPath, tdmapsDBSchema)
if data is None:
result = {
"apiVersion": apiVersion,
"params": request.args,
"method": request.method,
"took": 0,
"error" : {
"code": 404,
"message": "Something went wrong.",
"url": request.url,
},
}
return_code = 404
else:
tdmap_link = "{0}api/tdmaps/{1}".format( request.host_url, tdmapid )
result = {
"apiVersion": apiVersion,
"params": request.args,
"method": request.method,
"took": 0,
"data" : data,
"links" : { "tdmap" : { "self" : tdmap_link } },
}
return_code = 200
return jsonpify(result), return_code
@tdmaps.route('/setup', methods = ['POST'])
def api_tdmaps_setup():
global apiVersion
global tdmapsDBPath
global tdmapsDBSchema
status = None
data_dict = None
if len(request.data) > 0:
data_dict = json.loads( request.data )
data = {}
inserted_tdmap_id = save_tdmaps_setup_data(None, data_dict, tdmapsDBPath, tdmapsDBSchema)
tdmap_link = "{0}api/tdmaps/{1}".format( request.host_url, inserted_tdmap_id )
status = True
data = { 'id' : inserted_tdmap_id }
if status is None:
result = {
"apiVersion": apiVersion,
"params": request.args,
"method": request.method,
"took": 0,
"error" : {
"code": 404,
"message": "Something went wrong.",
"url": request.url,
},
}
return_code = 404
else:
result = {
"apiVersion": apiVersion,
"params": request.args,
"method": request.method,
"took": 0,
"data" : data,
"links" : { "tdmap" : { "self" : tdmap_link } },
}
return_code = 200
return jsonpify(result), return_code
| 2.234375 | 2 |
src/localmodule.py | lostanlen/ismir2018-lbd | 6 | 12799025 | from joblib import Memory
import math
import music21 as m21
import numpy as np
import os
from scipy.fftpack import fft, ifft
def get_composers():
return ["Haydn", "Mozart"]
def get_data_dir():
return "/scratch/vl1019/nemisig2018_data"
def get_dataset_name():
return "nemisig2018"
def concatenate_layers(Sx, depth):
layers = []
for m in range(depth+1):
layers.append(Sx[m].flatten())
return np.concatenate(layers)
def frequential_filterbank(dim, J_fr, xi=0.4, sigma=0.16):
N = 2**J_fr
filterbank = np.zeros((N, 1, 2*(J_fr-2)+1))
for j in range(J_fr-2):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * N
den = 2 * sigma_j * sigma_j * N * N
psi = morlet(center, den, N, n_periods=4)
filterbank[:, 0, j] = psi
for j in range(J_fr-2, 2*(J_fr-2)):
psi = filterbank[:, 0, j - (J_fr-2)]
rev_psi = np.concatenate((psi[0:1], psi[1:][::-1]))
filterbank[:, 0, j] = rev_psi
sigma_phi = 2.0 * sigma * 2**(-(J_fr-2))
center_phi = 0.0
den_phi = sigma_phi * sigma_phi * N * N
phi = gabor(center_phi, den_phi, N)
rev_phi = np.concatenate((phi[0:1], phi[1:][::-1]))
phi = phi + rev_phi
phi[0] = 1.0
filterbank[:, 0, -1] = phi
for m in range(dim):
filterbank = np.expand_dims(filterbank, axis=2)
return filterbank
def gabor(center, den, N):
omegas = np.array(range(N))
return gauss(omegas - center, den)
def gauss(omega, den):
return np.exp(- omega*omega / den)
def is_even(n):
return (n%2 == 0)
def morlet(center, den, N, n_periods):
half_N = N >> 1
p_start = - ((n_periods-1) >> 1) - is_even(n_periods)
p_stop = ((n_periods-1) >> 1) + 1
omega_start = p_start * N
omega_stop = p_stop * N
omegas = np.array(range(omega_start, omega_stop))
gauss_center = gauss(omegas - center, den)
corrective_gaussians = np.zeros((N*n_periods, n_periods))
for p in range(n_periods):
offset = (p_start + p) * N
corrective_gaussians[:, p] = gauss(omegas - offset, den)
p_range = range(p_start, p_stop)
b = np.array([gauss(p*N - center, den) for p in p_range])
A = np.array([gauss((q-p)*N, den)
for p in range(n_periods)
for q in range(n_periods)]).reshape(n_periods, n_periods)
corrective_factors = np.linalg.solve(A, b)
y = gauss_center - np.dot(corrective_gaussians, corrective_factors)
y = np.fft.fftshift(y)
y = np.reshape(y, (n_periods, N))
y = np.sum(y, axis=0)
return y
def scatter(U, filterbank, dim):
U_ft = fft(U, axis=dim)
U_ft = np.expand_dims(U_ft, axis=-1)
Y_ft = U_ft * filterbank
Y = ifft(Y_ft, axis=dim)
return Y
def setup_timefrequency_scattering(J_tm, J_fr, depth):
filterbanks_tm = []
filterbanks_fr = []
for m in range(depth):
filterbank_tm = temporal_filterbank(2*m, J_tm)
filterbank_fr = frequential_filterbank(2*m+1, J_fr)
filterbanks_tm.append(filterbank_tm)
filterbanks_fr.append(filterbank_fr)
return (filterbanks_tm, filterbanks_fr)
def temporal_filterbank(dim, J_tm, xi=0.4, sigma=0.16):
N = 2**J_tm
filterbank = np.zeros((1, N, J_tm-2))
for j in range(J_tm-2):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * N
den = 2 * sigma_j * sigma_j * N * N
psi = morlet(center, den, N, n_periods=4)
filterbank[0, :, j] = psi
for m in range(dim):
filterbank = np.expand_dims(filterbank, axis=2)
return filterbank
def temporal_scattering(pianoroll, filterbanks, nonlinearity):
depth = len(filterbanks)
Us = [pianoroll]
Ss = []
for m in range(depth):
U = Us[m]
S = np.sum(U, axis=(0, 1))
filterbank = filterbanks[m]
Y = scatter(U, filterbank, 1)
if nonlinearity == "abs":
U = np.abs(Y)
else:
raise NotImplementedError
Us.append(U)
Ss.append(S)
S = np.sum(U, axis=(0, 1))
Ss.append(S)
return Ss
def timefrequency_scattering(pianoroll, filterbanks, nonlinearity):
filterbanks_tm = filterbanks[0]
filterbanks_fr = filterbanks[1]
depth = len(filterbanks_tm)
Us = [pianoroll]
Ss = []
for m in range(depth):
U = Us[m]
S = np.sum(U, axis=(0,1))
filterbank_tm = filterbanks_tm[m]
filterbank_fr = filterbanks_fr[m]
Y_tm = scatter(U, filterbank_tm, 1)
Y_fr = scatter(Y_tm, filterbank_fr, 0)
if nonlinearity == "abs":
U = np.abs(Y_fr)
else:
raise NotImplementedError
Us.append(U)
Ss.append(S)
S = np.sum(U, axis=(0, 1))
Ss.append(S)
return Ss
| 2.28125 | 2 |
tests/test_web.py | natumbri/mopidy-bandcamp | 16 | 12799026 | <filename>tests/test_web.py
from unittest import mock
import tornado.testing
import tornado.web
import tornado.websocket
import mopidy.config as config
from mopidy_bandcamp import Extension
class BaseTest(tornado.testing.AsyncHTTPTestCase):
def get_app(self):
extension = Extension()
self.config = config.Proxy({})
return tornado.web.Application(extension.factory(self.config, mock.Mock()))
class WebHandlerTest(BaseTest):
def test_index(self):
response = self.fetch("/", method="GET")
assert response.code == 200
response = self.fetch("/?url=https%3A%2F%2Fgoogle.com%2F", method="GET")
assert response.code == 200
body = tornado.escape.to_unicode(response.body)
assert "<title>Error</title>" in body
response = self.fetch(
"/?url=https%3A%2F%2Flouiezong.bandcamp.com%2Ftrack%2Fbrain-age",
method="GET",
)
assert response.code == 200
body = tornado.escape.to_unicode(response.body)
assert "<title>URL Added</title>" in body
| 2.296875 | 2 |
nodes/1.x/python/Material.Properties.py | jdehotin/Clockworkfordynamo | 147 | 12799027 | <gh_stars>100-1000
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
mats = UnwrapElement(IN[0])
colorlist = list()
glowlist = list()
classlist = list()
shinylist = list()
smoothlist = list()
translist = list()
for mat in mats:
colorlist.append(mat.Color)
if mat.Glow:
glowlist.append(True)
else:
glowlist.append(False)
classlist.append(mat.MaterialClass)
shinylist.append(mat.Shininess)
smoothlist.append(mat.Smoothness)
translist.append(mat.Transparency)
OUT = (classlist,colorlist,glowlist,shinylist,smoothlist,translist) | 2.0625 | 2 |
meta_tagger/cms_sitemap.py | dreipol/meta-tagger | 3 | 12799028 | from cms.sitemaps import CMSSitemap
class MetaTagRobotsSiteMap(CMSSitemap):
def items(self):
return super(MetaTagRobotsSiteMap, self).items().exclude(page__metatagpageextension__robots_indexing=False)
| 1.507813 | 2 |
project2/api/simulator.py | fabiorodp/IN_STK5000_Adaptive_methods_for_data_based_decision_making | 0 | 12799029 | ## Comborbidities:
## Comborbidities:
## Asthma, Obesity, Smoking, Diabetes, Heart diseae, Hypertension
## Symptom list: Covid-Recovered, Covid-Positive, Taste, Fever, Headache,
# Pneumonia, Stomach, Myocarditis, Blood-Clots, Death
## Mild symptoms: Taste, Fever, Headache, Stomach
## Critical symptoms: Pneumonia, Myocarditis, Blood-Clots
import numpy as np
import pickle
class Person:
def __init__(self, pop):
self.genes = np.random.choice(2, size=pop.n_genes)
self.gender = np.random.choice(2, 1)
self.age = np.random.gamma(3, 11)
self.age_adj = self.age / 100 # age affects everything
self.income = np.random.gamma(1, 10000)
self.comorbidities = [0] * pop.n_comorbidities
self.comorbidities[0] = pop.asthma
self.comorbidities[1] = pop.obesity * self.age_adj
self.comorbidities[2] = pop.smoking
self.diab = pop.diabetes + self.comorbidities[1] * 0.5
self.HT = pop.htension + self.comorbidities[2] * 0.5
self.comorbidities[3] = self.diab
self.comorbidities[4] = pop.heart * self.age_adj
self.comorbidities[5] = self.HT * self.age_adj
for i in range(pop.n_comorbidities):
if (np.random.uniform() < self.comorbidities[i]):
self.comorbidities[i] = 1
else:
self.comorbidities[i] = 0
self.symptom_baseline = np.array(
[pop.historical_prevalence, pop.prevalence, 0.01, 0.05, 0.05, 0.01,
0.02, 0.001, 0.001, 0.001]);
self.symptom_baseline = np.array(
np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline
self.symptom_baseline[0] = pop.historical_prevalence;
self.symptom_baseline[1] = pop.prevalence;
if (self.gender == 1):
self.symptom_baseline[8] += 0.01
else:
self.symptom_baseline[7] += 0.01
self.symptom_baseline[9] += 0.01
# Initially no symptoms apart from Covid+/CovidPre
self.symptoms = [0] * pop.n_symptoms
if (np.random.uniform() <= self.symptom_baseline[0]):
self.symptoms[0] = 1
# increase symptom probabilities for symptoms when covid+
if (np.random.uniform() <= self.symptom_baseline[1]):
self.symptoms[1] = 1
self.symptom_baseline = np.array(
[pop.historical_prevalence, pop.prevalence, 0.3, 0.2, 0.05,
0.2, 0.02, 0.05, 0.2, 0.1]);
self.vaccines = [0] * pop.n_vaccines
# use vaccine = -1 if no vaccine is given
def vaccinate(self, vaccine_array, pop):
## Vaccinated
if (sum(vaccine_array) >= 0):
vaccinated = True
else:
vaccinated = False
if (vaccinated):
vaccine = np.argmax(vaccine_array)
self.vaccines = vaccine_array
self.symptom_baseline[1] *= pop.baseline_efficacy[vaccine]
if (vaccinated and self.symptoms[1] == 1):
self.symptom_baseline[[2, 3, 4, 6]] *= pop.mild_efficacy[vaccine]
self.symptom_baseline[[5, 7, 8]] *= pop.critical_efficacy[vaccine]
self.symptom_baseline[9] *= pop.death_efficacy[vaccine]
if (self.symptoms[0] == 1):
self.symptom_baseline *= 0.5
# baseline symptoms of non-covid patients
if (self.symptoms[0] == 0 and self.symptoms[1] == 0):
self.symptom_baseline = np.array(
[0, 0, 0.001, 0.01, 0.02, 0.002, 0.005, 0.001, 0.002, 0.0001])
## Common side-effects
if (vaccine == 1):
self.symptom_baseline[8] += 0.01
self.symptom_baseline[9] += 0.001
if (vaccine == 2):
self.symptom_baseline[7] += 0.01
if (vaccine >= 0):
self.symptom_baseline[3] += 0.2
self.symptom_baseline[4] += 0.1
# model long covid sufferers by increasing the chances of various
# symptoms slightly
if (self.symptoms[0] == 1 and self.symptoms[1] == 0):
self.symptom_baseline += np.array(
[0, 0, 0.06, 0.04, 0.01, 0.04, 0.004, 0.01, 0.04, 0.01]);
# genetic factors
self.symptom_baseline = np.array(
np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline
# print("V:", vaccine, symptom_baseline)
for s in range(2, pop.n_symptoms):
if (np.random.uniform() < self.symptom_baseline[s]):
self.symptoms[s] = 1
class Population:
def __init__(self, n_genes, n_vaccines, n_treatments):
self.n_genes = n_genes
self.n_comorbidities = 6;
self.n_symptoms = 10
self.n_vaccines = n_vaccines
self.n_treatments = n_treatments
self.G = np.random.uniform(size=[self.n_genes, self.n_symptoms])
self.G /= sum(self.G)
self.A = np.random.uniform(size=[self.n_treatments, self.n_symptoms])
self.asthma = 0.08
self.obesity = 0.3
self.smoking = 0.2
self.diabetes = 0.1
self.heart = 0.15
self.htension = 0.3
self.baseline_efficacy = [0.5, 0.6, 0.7]
self.mild_efficacy = [0.6, 0.7, 0.8]
self.critical_efficacy = [0.8, 0.75, 0.85]
self.death_efficacy = [0.9, 0.95, 0.9]
self.vaccination_rate = [0.7, 0.1, 0.1, 0.1]
self.prevalence = 0.1
self.historical_prevalence = 0.1
## Generates data with the following structure:
## X: characteristics before treatment, including whether or not they
# were vaccinated
## The generated population may already be vaccinated.
def generate(self, n_individuals):
"""Generate a population.
Call this function before anything else is done.
Calling this function again generates a completely new population
sample, purging the previous one from memory.
:param int n_individuals: the number of individuals to generate
"""
self.n_individuals = n_individuals
X = np.zeros([n_individuals,
3 + self.n_genes + self.n_comorbidities
+ self.n_vaccines + self.n_symptoms])
Y = np.zeros([n_individuals, self.n_treatments, self.n_symptoms])
self.persons = []
for t in range(n_individuals):
person = Person(self)
vaccine = np.random.choice(4, p=self.vaccination_rate) - 1
vaccine_array = np.zeros(self.n_vaccines)
if (vaccine >= 0):
vaccine_array[vaccine] = 1
person.vaccinate(vaccine_array, self)
self.persons.append(person)
x_t = np.concatenate(
[person.symptoms, [person.age, person.gender, person.income],
person.genes, person.comorbidities, person.vaccines])
X[t, :] = x_t
self.X = X
return X
def vaccinate(self, person_index, vaccine_array):
""" Give a vaccine to a specific person.
Args:
person_index (int array), indices of person in the population
vaccine_array (n*|A| array), array indicating which vaccines are to
be given to each patient
Returns:
The symptoms of the selected individuals
Notes:
Currently only one vaccine dose is implemented, but in the future
multiple doses may be modelled.
"""
outcome = np.zeros([len(person_index), self.n_symptoms])
i = 0
for t in person_index:
self.persons[t].vaccinate(vaccine_array[i], self)
outcome[i] = self.persons[i].symptoms
i += 1
return outcome
def treat(self, person_index, treatment):
""" Treat a patient.
Args:
person_index (int array), indices of persons in the population to treat
treatment_array (n*|A| array), array indicating which treatments are
to be given to each patient
Returns:
The symptoms of the selected individuals
"""
N = len(person_index)
result = np.zeros([N, self.n_symptoms])
# use i to index the treated
# use t to index the original population
# print(treatment)
for i in range(N):
t = person_index[i]
r = np.array(np.matrix(treatment[i]) * self.A).flatten()
for k in range(self.n_symptoms):
if (k <= 1):
result[i, k] = self.X[t, k]
else:
if (np.random.uniform() < r[k]):
result[i, k] = 0
else:
result[i, k] = self.X[t, k]
return result
def get_features(self, person_index):
x_t = np.concatenate([self.persons[t].symptoms,
[self.persons[t].age, self.persons[t].gender,
self.persons[t].income], self.persons[t].genes,
self.persons[t].comorbidities,
self.persons[t].vaccines])
return x_t
## Treats a population
def treatment(self, X, policy):
treatments = np.zeros([X.shape[0], self.n_treatments])
result = np.zeros([X.shape[0], self.n_symptoms])
for t in range(X.shape[0]):
# print ("X:", result[t])
treatments[t][policy.get_action(X[t])] = 1
r = np.array(np.matrix(treatments[t]) * self.A).flatten()
for k in range(self.n_symptoms):
if (k <= 1):
result[t, k] = X[t, k]
else:
if (np.random.uniform() < r[k]):
result[t, k] = 0
else:
result[t, k] = X[t, k]
##print("X:", X[t,:self.n_symptoms] , "Y:", result[t])
return treatments, result
# main
if __name__ == "__main__":
import pandas
try:
import policy
except:
import project2.src.covid.policy
n_symptoms = 10
n_genes = 128
n_vaccines = 3
n_treatments = 4
pop = Population(n_genes, n_vaccines, n_treatments)
n_observations = 1000
X_observation = pop.generate(n_observations)
pandas.DataFrame(X_observation).to_csv('observation_features.csv',
header=False, index=False)
n_treated = 1000
X_treatment = pop.generate(n_treated)
X_treatment = X_treatment[X_treatment[:, 1] == 1]
print("Generating treatment outcomes")
a, y = pop.treatment(X_treatment, policy.RandomPolicy(n_treatments))
pandas.DataFrame(X_treatment).to_csv('treatment_features.csv',
header=False, index=False)
pandas.DataFrame(a).to_csv('treatment_actions.csv', header=False,
index=False)
pandas.DataFrame(y).to_csv('treatment_outcomes.csv', header=False,
index=False)
| 2.546875 | 3 |
code_samples/vec2d_use.py | xanewton/PygameCreepsGame | 1 | 12799030 | <reponame>xanewton/PygameCreepsGame
#! /usr/bin/env python
import sys
sys.path.append('..')
from vec2d import vec2d
v = vec2d(-1, 1)
print (v.angle)
| 2.25 | 2 |
tarakania_rpg/db/redis.py | tarakania/discord-bot | 1 | 12799031 | from typing import Any, Dict
from asyncio import sleep
from logging import getLogger
import aioredis
log = getLogger(__name__)
class _ConnectionsPool(aioredis.ConnectionsPool):
def __init__(
self, *args: Any, retry_count: int = 5, retry_interval: int = 2, **kwargs: Any
) -> None:
super().__init__(*args, **kwargs)
self._retry_count = retry_count
self._retry_interval = retry_interval
async def execute(self, command: str, *args: Any, **kwargs: Any) -> Any:
exc: Exception
for i in range(self._retry_count):
try:
return await super().execute(command, *args, **kwargs)
except (
aioredis.ConnectionClosedError,
aioredis.PoolClosedError,
ConnectionRefusedError,
) as e:
log.debug(
f"Command {command} failed, remaining attempts: {self._retry_count - i}"
)
exc = e
await sleep(self._retry_interval)
log.error(f"Command {command} has failed after {self._retry_count} retries")
raise exc
async def create_redis_pool(config: Dict[str, Any]) -> _ConnectionsPool:
config = config.copy()
address = (config.pop("host"), config.pop("port"))
return await aioredis.create_pool(address, pool_cls=_ConnectionsPool, **config)
| 2.34375 | 2 |
train.py | AlexMetsai/pytorch-time-series-autoencoder | 0 | 12799032 | # <NAME>
# <EMAIL>
# MIT License
# As-simple-as-possible training loop for an autoencoder.
import torch
import numpy as np
import torch.optim as optim
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from model.shallow_autoencoder import ConvAutoencoder
# load model definition
model = ConvAutoencoder()
model = model.double() # tackles a type error
# define loss and optimizer
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# Toy data:
# Using separate input and output variables to cover all cases,
# since Y could differ from X (e.g. for denoising autoencoders).
X = np.random.random((300, 1, 100))
Y = X
# prepare pytorch dataloader
dataset = TensorDataset(torch.tensor(X), torch.tensor(Y))
dataloader = DataLoader(dataset, batch_size=256, shuffle=True)
# Training loop
for epoch in range(200):
for x, y in dataloader:
optimizer.zero_grad()
# forward and backward pass
out = model(x)
loss = criterion(out, y)
loss.backward()
optimizer.step()
print(loss.item()) # loss should be decreasing
| 3.109375 | 3 |
regtests/bench/copy_list-typed-stack.py | secureosv/pythia | 17 | 12799033 | <reponame>secureosv/pythia<filename>regtests/bench/copy_list-typed-stack.py
'''copy list micro benchmark'''
from time import clock
from runtime import *
with stack:
def copy_list( a:[]int, n ) -> [][]int:
x = [][]int()
for i in range(n):
b = a[:]
for j in range(10):
b.push_back( j )
x.push_back( b )
return x
def test():
a = range(1000)
times = []double()
for i in range(4):
t0 = clock()
res = copy_list(addr(a), 10000)
tk = clock()
times.append(tk - t0)
avg = sumd(times) / len(times)
print(avg)
def main():
test()
main() | 2.453125 | 2 |
examples/expander.py | furimu1234/discord-ext-utils | 1 | 12799034 | <reponame>furimu1234/discord-ext-utils
from discord.ext.utils import MinimalExpander
from discord.ext import commands
bot = commands.Bot(command_prefix=commands.when_mentioned)
# Expand simple.
bot.load_extension("discord.ext.utils.cogs.minimal_expander")
# Expand all.
# self.load_extension("discord.ext.utils.cogs.expander")
# Expand all and send with webhook.
# self.load_extension("discord.ext.utils.cogs.webhook_expander")
class Expander(MinimalExpander):
async def check_global_expand(self, guild_id):
# guild_id is the id of the guild that has the message to expand.
# If this function returns True, allow the message to be expanded to another guild.
pass
# Customize the behavior of the deployment.
# All Cogs are named ExpanderCog.
bot.get_cog("ExpanderCog").expander = Expander
bot.run("TOKEN")
| 2.234375 | 2 |
apps/quotes/quote_fetcher/quote_fetcher.py | Ev1dentSnow/ArtemisAPI_django | 0 | 12799035 | <gh_stars>0
import json
import requests
from apps.quotes import models
def fetch_quote():
pass
| 1.140625 | 1 |
standalone.py | askehill/covis2 | 0 | 12799036 | <reponame>askehill/covis2
#! /usr/bin/python3
import seeed_mlx9064x
import time,board,busio
import numpy as np
import adafruit_mlx90640
import matplotlib.pyplot as plt
from scipy import ndimage
import argparse
parser = argparse.ArgumentParser(description='Thermal Camera Program')
parser.add_argument('--mirror', dest='imageMirror', action='store_const', default='false',
const='imageMirror', help='Flip the image for selfie (default: false)')
args = parser.parse_args()
imageMirror = args.imageMirror
if(imageMirror == 'false'):
print('Mirror mode: false')
else:
imageMirror = 'true'
print('Mirror mode: true')
mlx = seeed_mlx9064x.grove_mxl90641()
frame = [0] * 192
mlx.refresh_rate = seeed_mlx9064x.RefreshRate.REFRESH_8_HZ
mlx_shape = (12,16) # mlx90641 shape
mlx_interp_val = 10 # interpolate # on each dimension
mlx_interp_shape = (mlx_shape[0]*mlx_interp_val,
mlx_shape[1]*mlx_interp_val) # new shape
fig = plt.figure(figsize=(12,9)) # start figure
ax = fig.add_subplot(111) # add subplot
fig.subplots_adjust(0.05,0.05,0.95,0.95) # get rid of unnecessary padding
therm1 = ax.imshow(np.zeros(mlx_interp_shape),interpolation='none',
cmap=plt.cm.bwr,vmin=25,vmax=45) # preemptive image
cbar = fig.colorbar(therm1) # setup colorbar
cbar.set_label('Temperature [$^{\circ}$C]',fontsize=14) # colorbar label
fig.canvas.draw() # draw figure to copy background
ax_background = fig.canvas.copy_from_bbox(ax.bbox) # copy background
ax.text(-75, 125, 'Max:', color='red')
textMaxValue = ax.text(-75, 150, 'test1', color='black')
fig.show() # show the figure before blitting
def plot_update():
fig.canvas.restore_region(ax_background) # restore background
mlx.getFrame(frame) # read mlx90640
data_array = np.fliplr(np.reshape(frame,mlx_shape)) # reshape, flip data
if(imageMirror == 'true'):
data_array = np.flipud(data_array)
data_array = ndimage.zoom(data_array,mlx_interp_val) # interpolate
therm1.set_array(data_array) # set data
therm1.set_clim(vmin=np.min(data_array),vmax=np.max(data_array)) # set bounds
cbar.on_mappable_changed(therm1) # update colorbar range
plt.pause(0.001)
ax.draw_artist(therm1) # draw new thermal image
textMaxValue.set_text(str(np.round(np.max(data_array), 1)))
fig.canvas.blit(ax.bbox) # draw background
fig.canvas.flush_events() # show the new image
fig.show()
return
t_array = []
while True:
t1 = time.monotonic() # for determining frame rate
try:
plot_update() # update plot
except:
continue
# approximating frame rate
t_array.append(time.monotonic()-t1)
if len(t_array)>10:
t_array = t_array[1:] # recent times for frame rate approx
print('Frame Rate: {0:2.1f}fps'.format(len(t_array)/np.sum(t_array)))
| 2.171875 | 2 |
tloen/domain/transports.py | josiah-wolf-oberholtzer/tloen | 3 | 12799037 | import asyncio
import dataclasses
import enum
from typing import Dict, Optional, Set, Tuple
from supriya.clocks import AsyncTempoClock, Moment
from ..bases import Event
from .bases import ApplicationObject
from .parameters import ParameterGroup, ParameterObject
class Transport(ApplicationObject):
### CLASS VARIABLES ###
class EventType(enum.IntEnum):
CHANGE = 0
SCHEDULE = 1
MIDI_PERFORM = 2
DEVICE_NOTE_OFF = 3
DEVICE_NOTE_ON = 4
CLIP_LAUNCH = 5
CLIP_EDIT = 6
CLIP_PERFORM = 7
### INITIALIZER ###
def __init__(self):
ApplicationObject.__init__(self)
self._parameter_group = ParameterGroup()
self._parameters: Dict[str, ParameterObject] = {}
self._clock = AsyncTempoClock()
self._dependencies: Set[ApplicationObject] = set()
self._mutate(slice(None), [self._parameter_group])
self._tick_event_id = None
### PRIVATE METHODS ###
async def _application_perform_callback(self, clock_context, midi_message):
await self.application.perform(
[midi_message], moment=clock_context.current_moment
)
@classmethod
async def _deserialize(cls, data, transport_object):
await transport_object.set_tempo(data["spec"]["tempo"])
await transport_object.set_time_signature(*data["spec"]["time_signature"])
def _serialize(self):
return {
"kind": type(self).__name__,
"spec": {
"tempo": self._clock.beats_per_minute,
"time_signature": list(self._clock.time_signature),
},
}
def _tick_callback(self, clock_context):
self.application.pubsub.publish(TransportTicked(clock_context.desired_moment))
return 1 / clock_context.desired_moment.time_signature[1] / 4
### PUBLIC METHODS ###
async def cue(self, *args, **kwargs) -> int:
return self._clock.cue(*args, **kwargs)
async def cancel(self, *args, **kwargs) -> Optional[Tuple]:
return self._clock.cancel(*args, **kwargs)
async def perform(self, midi_messages):
if (
self.application is None
or self.application.status != self.application.Status.REALTIME
):
return
self._debug_tree(
self, "Perform", suffix=repr([type(_).__name__ for _ in midi_messages])
)
await self.schedule(self._application_perform_callback, args=midi_messages)
if not self.is_running:
await self.start()
async def reschedule(self, *args, **kwargs) -> Optional[int]:
return self._clock.reschedule(*args, **kwargs)
async def schedule(self, *args, **kwargs) -> int:
return self._clock.schedule(*args, **kwargs)
async def set_tempo(self, beats_per_minute: float):
self._clock.change(beats_per_minute=beats_per_minute)
async def set_time_signature(self, numerator, denominator):
self._clock.change(time_signature=[numerator, denominator])
async def start(self):
async with self.lock([self]):
self._tick_event_id = await self.cue(self._tick_callback)
await asyncio.gather(*[_._start() for _ in self._dependencies])
await self._clock.start()
self.application.pubsub.publish(TransportStarted())
async def stop(self):
await self._clock.stop()
async with self.lock([self]):
await asyncio.gather(*[_._stop() for _ in self._dependencies])
await self.application.flush()
await self.cancel(self._tick_event_id)
self.application.pubsub.publish(TransportStopped())
### PUBLIC PROPERTIES ###
@property
def clock(self):
return self._clock
@property
def is_running(self):
return self._clock.is_running
@property
def parameters(self):
return self._parameters
@dataclasses.dataclass
class TransportStarted(Event):
pass
@dataclasses.dataclass
class TransportStopped(Event):
pass
@dataclasses.dataclass
class TransportTicked(Event): # TODO: ClipView needs to know start delta
moment: Moment
| 2.09375 | 2 |
django/app/hivernants/admin.py | jeanpommier/ker51 | 0 | 12799038 | <gh_stars>0
from django.contrib.gis import admin
from .models import Hivernant, Phone, Email
class PhoneInline(admin.TabularInline):
model = Phone
extra = 1
class EmailInline(admin.TabularInline):
model = Email
extra = 1
class HivernantAdmin(admin.OSMGeoAdmin):
default_lon = 300000
default_lat = 6000000
# default_lon = 2
# default_lat = 42
default_zoom = 2
list_display = ('names', 'fulladdress', 'comments')
inlines = [PhoneInline, EmailInline]
admin.site.register(Hivernant, HivernantAdmin) | 1.796875 | 2 |
sagas/ofbiz/rpc_artifacts.py | samlet/stack | 3 | 12799039 | <reponame>samlet/stack
from concurrent import futures
import time
import grpc
import requests
import json
import blueprints_pb2_grpc
import blueprints_pb2
from sagas.ofbiz.blackboard import Blackboard, BlackboardReceiver
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
"""
$ start as
# or: python -m sagas.ofbiz.rpc_artifacts
"""
#$ ./query.py talk "hi"
#$ ./query.py talk "/joke"
def talk(sender, a_str):
headers = {
'content-type': 'application/json',
}
# data = '{'+'"message": "{}"'.format(a_str)+'}'
data={'sender':sender, 'message':a_str}
response = requests.post('http://localhost:5005/webhooks/rest/webhook',
headers=headers, data=json.dumps(data))
# print(response.text)
return response.json()
class ArtifactService(blueprints_pb2_grpc.ArtifactServiceServicer):
def Talk(self, request, context):
rs=talk(request.sender, request.message)
recips = []
for r in rs:
recips.append(blueprints_pb2.BotRecipient(id=r['recipient_id'], text=r['text']))
return blueprints_pb2.BotResponse(recipients=recips)
def SetSlot(self, request, context):
return super().SetSlot(request, context)
def __init__(self, artifacts):
self.artifacts=artifacts
def Ping(self, request, context):
print('get %s'%request)
self.artifacts.blackboard.send(request.message)
return blueprints_pb2.PingResponse(response="world")
class Artifacts(object):
def __init__(self, port='0.0.0.0:20051'):
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
blueprints_pb2_grpc.add_ArtifactServiceServicer_to_server(ArtifactService(self), self.server)
self.port=port
self.server.add_insecure_port(port)
self.blackboard=Blackboard()
# self.receiver=BlackboardReceiver()
def serve(self, blocking=True):
self.server.start()
print(".. artifacts servant started on", self.port)
if blocking:
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
self.server.stop(0)
self.blackboard.close()
# else:
# self.receiver.serve('anonymous.info')
if __name__ == '__main__':
# import asyncio
# from sagas.tests.bus.aio.receive_logs_topic import main
# # message receiver
# loop = asyncio.get_event_loop()
# loop.create_task(main(loop))
# we enter a never-ending loop that waits for
# data and runs callbacks whenever necessary.
print(" [*] Waiting for messages ..")
# rpc and message sender
s=Artifacts()
s.serve()
# loop.run_forever()
| 2.203125 | 2 |
1 - Beginner/1789.py | andrematte/uri-submissions | 1 | 12799040 | <filename>1 - Beginner/1789.py
# URI Online Judge 1789
while True:
try:
N = int(input())
entrada = [int(i) for i in input().split()]
if max(entrada) < 10:
print(1)
elif max(entrada) >= 10 and max(entrada) < 20:
print(2)
elif max(entrada) >= 20:
print(3)
except EOFError:
break | 3.6875 | 4 |
database/postgresql.py | andreformento/querido-diario-data-processing | 3 | 12799041 | <filename>database/postgresql.py
from typing import Generator
import os
import logging
import psycopg2
from tasks import DatabaseInterface
def get_database_name():
return os.environ["POSTGRES_DB"]
def get_database_user():
return os.environ["POSTGRES_USER"]
def get_database_password():
return os.environ["POSTGRES_PASSWORD"]
def get_database_host():
return os.environ["POSTGRES_HOST"]
def get_database_port():
return os.environ["POSTGRES_PORT"]
def create_database_interface() -> DatabaseInterface:
return PostgreSQL(
get_database_host(),
get_database_name(),
get_database_user(),
get_database_password(),
get_database_port(),
)
class PostgreSQL(DatabaseInterface):
SELECT_PENDING_GAZETTES = """SELECT gazettes.id,
gazettes.source_text,
gazettes.date,
gazettes.edition_number,
gazettes.is_extra_edition,
gazettes.power,
gazettes.file_checksum,
gazettes.file_path,
gazettes.file_url,
gazettes.scraped_at,
gazettes.created_at,
gazettes.territory_id,
gazettes.processed,
territories.name as territory_name,
territories.state_code
FROM gazettes
INNER JOIN territories ON territories.id = gazettes.territory_id
WHERE processed is False;"""
UPDATE_GAZETTE_AS_PROCESSED = """UPDATE gazettes
SET processed = True
WHERE id = %(id)s
AND file_checksum = %(file_checksum)s;"""
def __init__(self, host, database, user, password, port):
self._connection = psycopg2.connect(
dbname=database, user=user, password=password, host=host, port=port
)
def format_gazette_data(self, data):
return {
"id": data[0],
"source_text": data[1],
"date": data[2],
"edition_number": data[3],
"is_extra_edition": data[4],
"power": data[5],
"file_checksum": data[6],
"file_path": data[7],
"file_url": data[8],
"scraped_at": data[9],
"created_at": data[10],
"territory_id": data[11],
"processed": data[12],
"territory_name": data[13],
"state_code": data[14],
}
def get_pending_gazettes(self) -> Generator:
with self._connection.cursor() as cursor:
cursor.execute(self.SELECT_PENDING_GAZETTES)
logging.debug(cursor.query)
for gazette_data in cursor:
logging.debug(gazette_data)
yield self.format_gazette_data(gazette_data)
logging.debug("No more gazettes to be processed")
def set_gazette_as_processed(self, id: int, gazette_file_checksum: str) -> None:
logging.debug(f"Marking {id}({gazette_file_checksum}) as processed")
with self._connection.cursor() as cursor:
cursor.execute(
self.UPDATE_GAZETTE_AS_PROCESSED,
{"id": id, "file_checksum": gazette_file_checksum},
)
self._connection.commit()
| 2.828125 | 3 |
hard-gists/df9ad5ef01d16c02d68d9a8c17c75b73/snippet.py | jjhenkel/dockerizeme | 21 | 12799042 |
#
# twitter csv process
# write by @jiyang_viz
#
# require:
# https://github.com/edburnett/twitter-text-python
#
# download csv file from:
# https://github.com/bpb27/political_twitter_archive/tree/master/realdonaldtrump
#
import json
import csv
from ttp import ttp
from dateutil import parser as date_parser
# read csv to Dict
with open('realdonaldtrump.csv', 'r') as f:
reader = csv.DictReader(f, delimiter = ',')
data = list(reader)
# write to json file (same fields as csv)
with open('realdonaldtrump.json', 'w') as f:
for item in data:
f.write(json.dumps(item) + '\n')
# get more info from text message
parser = ttp.Parser()
for item in data:
result = parser.parse(item['text'])
item['tags'] = result.tags
item['users'] = result.users
item['reply'] = result.reply
item['tweet_time'] = str(date_parser.parse(item['created_at']))
# write to json file (more fields)
with open('realdonaldtrump_more.json', 'w') as f:
for item in data:
f.write(json.dumps(item) + '\n') | 3.234375 | 3 |
thebotplatform.py | TheBotPlatform/POCInteractionEndPoint | 0 | 12799043 | <filename>thebotplatform.py
import requests
import json
from decouple import config
import time
#Creating and getting my bearer token
LastToken = False
LastTokenTime = False
TokenLifespan = 58 * 60
def BearerTokenGrab():
global LastToken
global LastTokenTime
now = time.time()
if LastToken and LastTokenTime > now - TokenLifespan:
return LastToken
url = "https://api.thebotplatform.com/oauth2/token"
payload = "client_id=" + config("TBP_CLIENT_ID") + "&client_secret=" + config("TBP_CLIENT_SECRET") + "&grant_type=client_credentials"
headers = {
"Accept": "application/json",
"Content-Type": "application/x-www-form-urlencoded"
}
response = requests.post(url, data=payload, headers=headers)
jsonResponse = response.json()
LastToken = jsonResponse['access_token']
LastTokenTime = now
return jsonResponse['access_token']
#Creates a User ID for the current user
def CreateUserID():
url = "https://api.thebotplatform.com/v1.0/interaction/user"
payload = []
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Bearer " + BearerTokenGrab()
}
response = requests.post(url, data=payload, headers=headers)
jsonResponse = response.json()
return jsonResponse['data']['attributes']['user']['id']
def getBotResponse(UserID, input):
url = "https://api.thebotplatform.com/v1.0/interaction"
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + BearerTokenGrab()
}
payloaddict = { "data": { "type": "interaction", "attributes": { "user": { "id": UserID }, "input": input } } }
response = requests.post(url, headers=headers, json=payloaddict)
jsonResponse = response.json()
formatJsonresp = json.dumps(jsonResponse, indent=4, sort_keys=True)
return formatJsonresp
print("Running the proxy server") | 3.078125 | 3 |
freeCodeCamp/01-scientific-computing-with-python/src/02-functions.py | aysedemirel/python-journey | 1 | 12799044 | <gh_stars>1-10
def sum(a,b):
return a+b
a = input("Enter first number: ")
b = input("Enter first number: ")
sum_numbers = sum(int(a),int(b))
print("Sum: ", sum_numbers) | 3.6875 | 4 |
batch_generator.py | mr4msm/cramer_gan_chainer | 3 | 12799045 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Batch generator definition."""
import cv2
import numpy as np
class ImageBatchGenerator(object):
"""Batch generator for training on general images."""
def __init__(self, input_files, batch_size, height, width, channel=3,
shuffle=False, flip_h=False):
assert batch_size > 0, batch_size
assert channel == 3 or channel == 1, channel
if channel == 3:
self._imread_flag = cv2.IMREAD_COLOR
else:
self._imread_flag = cv2.IMREAD_GRAYSCALE
self._input_files = input_files
self._batch_size = batch_size
self._height = height
self._width = width
self._shuffle = shuffle
self._flip_h = flip_h
for ifile in input_files:
image = cv2.imread(ifile, cv2.IMREAD_UNCHANGED)
assert isinstance(image, np.ndarray)
assert image.shape[:2] == (
height, width), (image.shape[:2], (height, width))
print('verify ' + ifile)
self._batch_generator = self.__get_batch_generator()
def __get_batch_generator(self):
batch = []
while True:
if self._shuffle:
file_index = np.random.permutation(self.n_samples)
else:
file_index = range(self.n_samples)
for idx in file_index:
image = cv2.imread(self._input_files[idx], self._imread_flag)
if self._flip_h:
if np.random.randint(2) == 0:
image = image[:, ::-1]
if image.ndim == 2:
image = image.reshape((1,) + image.shape)
else:
image = image.transpose((2, 0, 1))
image = image.astype(np.float32)
image = ((image / 255.) - 0.5) * 2.
batch.append(image)
if len(batch) == self._batch_size:
yield np.asarray(batch)
batch = []
@property
def n_samples(self):
return len(self._input_files)
def __next__(self):
self._batch = next(self._batch_generator)
return self._batch
def next(self):
return self.__next__()
| 3.015625 | 3 |
libraries/graphite2-1.3.12/tests/corrupt.py | myzhang1029/zmymingw | 0 | 12799046 | #!/usr/bin/env python
import optparse
import os
import shutil
import sys
def revert(path):
bkup = path + os.path.extsep + options.backup_suffix
if os.access(bkup, os.R_OK):
shutil.copy2(bkup, path)
os.remove(bkup)
def corrupt(path, offset, value):
if options.backup:
shutil.copy2(path, path + os.path.extsep + options.backup_suffix)
with open(path, 'r+b', buffering=0) as f:
f.seek(offset)
f.write(bytes(chr(value)))
parser = optparse.OptionParser(
usage='usage: %prog file byte-offset replacment-value')
parser.add_option("", "--revert",
action="store_true", default=False,
help="restore the path to pristine condition if possible.")
parser.add_option("-b", "--backup",
action="store_true", default=True, dest="backup",
help="create a backup of the uncorrupted original"
" [default: %default]")
parser.add_option("", "--no-backup",
action="store_false", dest="backup",
help="do not create a backup of the uncorrupted original.")
parser.add_option("", "--backup-suffix",
type="string", default="pristine",
help="suffix for uncorrupted copy of the file"
" [default: %default]")
(options, args) = parser.parse_args()
if options.revert:
if len(args) != 1:
parser.print_help()
sys.exit(1)
elif len(args) != 3:
parser.print_help()
sys.exit(1)
path = args[0]
revert(path)
if not options.revert:
offset = int(eval(args[1]))
value = int(eval(args[2]))
corrupt(path, offset, value)
| 2.921875 | 3 |
parser.py | veer66/vtimeline | 0 | 12799047 | <reponame>veer66/vtimeline
#-*- coding: UTF-8 -*-
#
# Copyright 2009 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import re
def parse_date(txt):
pat = re.compile('^(?P<day>\d+)/(?P<month>\d+)/(?P<year>\d+)$')
m = pat.match(txt)
if m:
d = m.groupdict()
d_ = {}
for k, v in d.items():
d_[k] = int(v)
d = d_
else:
pat2 = re.compile('^(?P<month>\d+)/(?P<year>\d+)$')
m2 = pat2.match(txt)
if m2:
d = m2.groupdict()
d_ = {}
for k, v in d.items():
d_[k] = int(v)
d = d_
d['day'] = None
else:
raise RuntimeError, "cannot parse date" + " :" + txt + ":"
return d
def parse_activity(m, statements):
desc = m.group(2).strip()
statements.append({'type': 'activity', 'desc': desc})
def parse_act_start(m):
date = parse_date(m.groupdict()['date'].strip())
return ('start', date)
def parse_act_end(m):
date = parse_date(m.groupdict()['date'].strip())
return ('end', date)
def parse_act_complete(m):
percent = int(m.groupdict()['percent'].strip())
return ('complete', percent)
def parse_activity_detail(m, statements):
pats = [(u"^ตั้งแต่(?P<date>.+)", parse_act_start),
(u"^ถึง(?P<date>.+)", parse_act_end),
(u"^สำเร็จร้อยละ\s*(?P<percent>\d+)", parse_act_complete)]
pats = map(lambda pat: (re.compile(pat[0]), pat[1]), pats)
result = None
for pat in pats:
m_ = pat[0].match(m.group(2).strip())
if m_:
result = pat[1](m_)
break
if result is None:
raise RuntimeError, "Cannot parse activity detail"
if len(statements) == 0 or statements[-1]['type'] != 'activity':
raise RuntimeError, "There is no activity before activity detail"
statements[-1][result[0]] = result[1]
def parse_start(m, statements):
statements.append({'type': 'start', 'date': parse_date(m.group(2).strip())})
def parse_end(m, statements):
statements.append({'type': 'end', 'date': parse_date(m.group(2).strip())})
def parse_resolution(m, statements):
m_ = re.match(u"^(\d+)\s+เดือน$", m.group(2).strip())
if m_:
statements.append({'type': 'resolution',
'day': 0,
'year': 0,
'month': int(m_.group(1))})
else:
raise RuntimeError, "Cannot parse resolution"
def parse_width(m, statements):
m_ = re.match(u"^(\d+)$", m.group(2).strip())
if m_:
statements.append({'type': 'width', 'value': int(m_.group(1))})
else:
raise RuntimeError, "Cannot parse width"
def parse_first_column_width(m, statements):
m_ = re.match(u"^(\d+)$", m.group(2).strip())
if m_:
statements.append({'type': '1st_col_width',
'value': int(m_.group(1))})
else:
raise RuntimeError, "Cannot parse first column width"
def parse(txt):
start_pats = [(u"(^กิจกรรม)(.+)", parse_activity) ,
(u"(^\*)(.+)", parse_activity_detail),
(u"(^เริ่ม)(.+)", parse_start),
( u"(^สิ้นสุด)(.+)", parse_end ),
(u"(^ความละเอียด)(.+)", parse_resolution),
(u"(^ช่องกว้าง)(.+)", parse_width),
(u"(^ช่องแรกกว้าง)(.+)", parse_first_column_width)]
start_pats = map(lambda pat: (re.compile(pat[0]), pat[1]), start_pats)
statements = []
for line in txt.replace("\r", "").split("\n"):
line = line.strip()
if line != '':
for pat in start_pats:
m = pat[0].match(line)
if m:
pat[1](m, statements)
break
return statements
| 2.453125 | 2 |
tests/telnet.py | cmu-sei/usersim | 10 | 12799048 | # Copyright 2017 <NAME>. See LICENSE.md file for terms.
import socket
import threading
import api
import usersim
TCP_IP = 'localhost'
TCP_PORT = 5005
def run_test():
telnet_config = {'type': 'telnet',
'config': {'host': TCP_IP,
'username': 'admin',
'password': 'password',
'commandlist': ['printstuff', 'do other stuff', 'do this thing'],
'port': TCP_PORT}}
sim = usersim.UserSim(True)
task_id = api.validate_config(telnet_config)
def start_server():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
conn, addr = s.accept()
print('Connection Address: ' + str(addr))
while True:
data = conn.recv(20)
if not data:
break
print('received data: ' + str(data))
conn.send(data)
conn.close()
if __name__ == '__main__':
run_test()
| 2.453125 | 2 |
polichart/api/views.py | cjmabry/PoliChart | 0 | 12799049 | <filename>polichart/api/views.py
# -*- coding: utf-8 -*-
from flask import Blueprint, current_app, request, jsonify
from flask.ext.login import login_user, current_user, logout_user
from ..extensions import db
from polichart import models
api = Blueprint('api', __name__, url_prefix='/api')
| 1.601563 | 2 |
guillotina/contrib/swagger/services.py | Qiwn/guillotina | 0 | 12799050 | import copy
import json
import os
from urllib.parse import urlparse
import pkg_resources
from guillotina import app_settings
from guillotina import configure
from guillotina.api.service import Service
from guillotina.component import getMultiAdapter
from guillotina.interfaces import IAbsoluteURL
from guillotina.utils import get_authenticated_user
from guillotina.utils import get_full_content_path
from guillotina.utils import get_request_scheme
from guillotina.utils import get_security_policy
from guillotina.utils import resolve_dotted_name
from zope.interface import Interface
from zope.interface.interfaces import ComponentLookupError
here = os.path.dirname(os.path.realpath(__file__))
@configure.service(
method="GET",
context=Interface,
name="@swagger",
permission="guillotina.swagger.View",
ignore=True,
)
class SwaggerDefinitionService(Service):
__allow_access__ = True
def get_data(self, data):
if callable(data):
data = data(self.context)
return data
def load_swagger_info(self, api_def, path, method, tags, service_def):
path = path.rstrip("/")
if path not in api_def:
api_def[path or "/"] = {}
desc = self.get_data(service_def.get("description", ""))
swagger_conf = service_def.get("swagger", {})
if swagger_conf.get("display_permission", True):
if desc:
desc += f" 〜 permission: {service_def['permission']}"
else:
desc += f"permission: {service_def['permission']}"
api_def[path or "/"][method.lower()] = {
"tags": swagger_conf.get("tags", [""]) or tags,
"parameters": self.get_data(service_def.get("parameters", {})),
"produces": self.get_data(service_def.get("produces", [])),
"summary": self.get_data(service_def.get("summary", "")),
"description": desc,
"responses": self.get_data(service_def.get("responses", {})),
}
def get_endpoints(self, iface_conf, base_path, api_def, tags=None):
tags = tags or []
for method in iface_conf.keys():
if method == "endpoints":
for name in iface_conf["endpoints"]:
self.get_endpoints(
iface_conf["endpoints"][name],
os.path.join(base_path, name),
api_def,
tags=[name.strip("@")],
)
else:
if method.lower() == "options":
continue
service_def = iface_conf[method]
swagger_conf = service_def.get("swagger", {})
if (service_def.get("ignore") or
service_def.get("swagger_ignore") or swagger_conf.get("ignore")):
continue
if not self.policy.check_permission(
service_def["permission"], self.context
):
continue
for sub_path in [""] + swagger_conf.get("extra_paths", []):
path = os.path.join(base_path, sub_path)
if "traversed_service_definitions" in service_def:
trav_defs = service_def[
"traversed_service_definitions"
]
if isinstance(trav_defs, dict):
for sub_path, sub_service_def in trav_defs.items():
sub_service_def["permission"] = service_def[
"permission"
]
self.load_swagger_info(
api_def,
os.path.join(path, sub_path),
method,
tags,
sub_service_def,
)
else:
self.load_swagger_info(
api_def, path, method, tags, service_def
)
async def __call__(self):
user = get_authenticated_user()
self.policy = get_security_policy(user)
definition = copy.deepcopy(
app_settings["swagger"]["base_configuration"]
)
vhm = self.request.headers.get("X-VirtualHost-Monster")
if vhm:
parsed_url = urlparse(vhm)
definition["host"] = parsed_url.netloc
definition["schemes"] = [parsed_url.scheme]
definition["basePath"] = parsed_url.path
else:
definition["host"] = self.request.host
definition["schemes"] = [get_request_scheme(self.request)]
if 'version' not in definition['info']:
definition["info"]["version"] = pkg_resources.get_distribution(
"guillotina"
).version
api_defs = app_settings["api_definition"]
path = get_full_content_path(self.context)
for dotted_iface in api_defs.keys():
iface = resolve_dotted_name(dotted_iface)
if iface.providedBy(self.context):
iface_conf = api_defs[dotted_iface]
self.get_endpoints(iface_conf, path, definition["paths"])
definition["definitions"] = app_settings["json_schema_definitions"]
return definition
AUTH_HTML = '''
<form id='api_selector'>
<div id="auth_container">
<div>
<a class="authorize__btn" href="#">Authorize</a>
</div>
</div>
</form>
'''
@configure.service(
method="GET",
context=Interface,
name="@docs",
permission="guillotina.swagger.View",
ignore=True,
)
async def render_docs_index(context, request):
if app_settings['swagger'].get('index_html'):
index_file = app_settings['swagger']['index_html']
else:
index_file = os.path.join(here, "index.html")
with open(index_file) as fi:
html = fi.read()
swagger_settings = app_settings["swagger"]
url = swagger_settings["base_url"] or request.headers.get(
"X-VirtualHost-Monster"
)
if url is None:
try:
url = getMultiAdapter((context, request), IAbsoluteURL)()
except ComponentLookupError:
url = "{}://{}".format(get_request_scheme(request), request.host)
swagger_settings["initial_swagger_url"] = url
if swagger_settings['authentication_allowed']:
auth = AUTH_HTML
else:
auth = ''
return html.format(
app_settings=app_settings,
request=request,
swagger_settings=json.dumps(swagger_settings),
base_url=url,
static_url="{}/swagger_static/".format(url if url != "/" else ""),
auth=auth,
title=swagger_settings['base_configuration']['info']['title']
)
| 2.046875 | 2 |
Subsets and Splits